code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def __lowerCamelCase ( __a :str , __a :List[str] , __a :List[str] ) -> Optional[Any]: """simple docstring""" if isinstance(__a , torch.Tensor ): return image elif isinstance(__a , PIL.Image.Image ): A__ = [image] if isinstance(image[0] , PIL.Image.Image ): A__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] A__ = np.concatenate(__a , axis=0 ) A__ = np.array(__a ).astype(np.floataa ) / 255.0 A__ = image.transpose(0 , 3 , 1 , 2 ) A__ = 2.0 * image - 1.0 A__ = torch.from_numpy(__a ) elif isinstance(image[0] , torch.Tensor ): A__ = torch.cat(__a , dim=0 ) return image def __lowerCamelCase ( __a :Union[str, Any] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[Any]=0.9995 ) -> int: """simple docstring""" if not isinstance(__a , np.ndarray ): A__ = True A__ = va.device A__ = va.cpu().numpy() A__ = va.cpu().numpy() A__ = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) ) if np.abs(__a ) > DOT_THRESHOLD: A__ = (1 - t) * va + t * va else: A__ = np.arccos(__a ) A__ = np.sin(__a ) A__ = theta_a * t A__ = np.sin(__a ) A__ = np.sin(theta_a - theta_t ) / sin_theta_a A__ = sin_theta_t / sin_theta_a A__ = sa * va + sa * va if inputs_are_torch: A__ = torch.from_numpy(__a ).to(__a ) return va def __lowerCamelCase ( __a :int , __a :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = F.normalize(__a , dim=-1 ) A__ = F.normalize(__a , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def __lowerCamelCase ( __a :Any , __a :Optional[int] ) -> int: """simple docstring""" for param in model.parameters(): A__ = value class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __lowerCAmelCase : CLIPFeatureExtractor , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[str]=None , ) -> str: """simple docstring""" super().__init__() self.register_modules( vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , clip_model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , coca_model=__lowerCAmelCase , coca_tokenizer=__lowerCAmelCase , coca_transform=__lowerCAmelCase , ) A__ = ( feature_extractor.size if isinstance(feature_extractor.size , __lowerCAmelCase ) else feature_extractor.size["""shortest_edge"""] ) A__ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , __lowerCAmelCase ) set_requires_grad(self.clip_model , __lowerCAmelCase ) def a_ ( self : int , __lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowerCAmelCase ) def a_ ( self : int ) -> str: """simple docstring""" self.enable_attention_slicing(__lowerCAmelCase ) def a_ ( self : int ) -> Tuple: """simple docstring""" set_requires_grad(self.vae , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" set_requires_grad(self.vae , __lowerCAmelCase ) def a_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" set_requires_grad(self.unet , __lowerCAmelCase ) def a_ ( self : Any ) -> Optional[Any]: """simple docstring""" set_requires_grad(self.unet , __lowerCAmelCase ) def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" A__ = min(int(num_inference_steps * strength ) , __lowerCAmelCase ) A__ = max(num_inference_steps - init_timestep , 0 ) A__ = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any]=None ) -> Optional[int]: """simple docstring""" if not isinstance(__lowerCAmelCase , torch.Tensor ): raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(__lowerCAmelCase )}' ) A__ = image.to(device=__lowerCAmelCase , dtype=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase ) ] A__ = torch.cat(__lowerCAmelCase , dim=0 ) else: A__ = self.vae.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A__ = 0.1_8_2_1_5 * init_latents A__ = init_latents.repeat_interleave(__lowerCAmelCase , dim=0 ) A__ = randn_tensor(init_latents.shape , generator=__lowerCAmelCase , device=__lowerCAmelCase , dtype=__lowerCAmelCase ) # get latents A__ = self.scheduler.add_noise(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = init_latents return latents def a_ ( self : Tuple , __lowerCAmelCase : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = self.coca_transform(__lowerCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): A__ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) A__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" ) def a_ ( self : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = self.feature_extractor.preprocess(__lowerCAmelCase ) A__ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half() A__ = self.clip_model.get_image_features(__lowerCAmelCase ) A__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCAmelCase ) A__ = image_embeddings_clip.repeat_interleave(__lowerCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def a_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , ) -> Tuple: """simple docstring""" A__ = latents.detach().requires_grad_() A__ = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # predict the noise residual A__ = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): A__ = self.scheduler.alphas_cumprod[timestep] A__ = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 A__ = torch.sqrt(__lowerCAmelCase ) A__ = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , __lowerCAmelCase ): A__ = self.scheduler.sigmas[index] A__ = latents - sigma * noise_pred else: raise ValueError(f'scheduler type {type(self.scheduler )} not supported' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A__ = 1 / 0.1_8_2_1_5 * sample A__ = self.vae.decode(__lowerCAmelCase ).sample A__ = (image / 2 + 0.5).clamp(0 , 1 ) A__ = transforms.Resize(self.feature_extractor_size )(__lowerCAmelCase ) A__ = self.normalize(__lowerCAmelCase ).to(latents.dtype ) A__ = self.clip_model.get_image_features(__lowerCAmelCase ) A__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__lowerCAmelCase ) A__ = spherical_dist_loss(__lowerCAmelCase , __lowerCAmelCase ).mean() * clip_guidance_scale A__ = -torch.autograd.grad(__lowerCAmelCase , __lowerCAmelCase )[0] if isinstance(self.scheduler , __lowerCAmelCase ): A__ = latents.detach() + grads * (sigma**2) A__ = noise_pred_original else: A__ = noise_pred_original - torch.sqrt(__lowerCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : List[Any] , __lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[int] = 5_12 , __lowerCAmelCase : Optional[int] = 5_12 , __lowerCAmelCase : float = 0.6 , __lowerCAmelCase : Optional[int] = 50 , __lowerCAmelCase : Optional[float] = 7.5 , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[float] = 1_00 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : float = 0.8 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , ) -> Dict: """simple docstring""" if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size: raise ValueError(f'You have passed {batch_size} batch_size, but only {len(__lowerCAmelCase )} generators.' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if isinstance(__lowerCAmelCase , torch.Generator ) and batch_size > 1: A__ = [generator] + [None] * (batch_size - 1) A__ = [ ("""model""", self.coca_model is None), ("""tokenizer""", self.coca_tokenizer is None), ("""transform""", self.coca_transform is None), ] A__ = [x[0] for x in coca_is_none if x[1]] A__ = """, """.join(__lowerCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(__lowerCAmelCase ): raise ValueError( f'Content prompt is None and CoCa [{coca_is_none_str}] is None.' f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) A__ = self.get_image_description(__lowerCAmelCase ) if style_prompt is None: if len(__lowerCAmelCase ): raise ValueError( f'Style prompt is None and CoCa [{coca_is_none_str}] is None.' f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' ) A__ = self.get_image_description(__lowerCAmelCase ) # get prompt text embeddings for content and style A__ = self.tokenizer( __lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors="""pt""" , ) A__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] A__ = self.tokenizer( __lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__lowerCAmelCase , return_tensors="""pt""" , ) A__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] A__ = slerp(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # duplicate text embeddings for each generation per prompt A__ = text_embeddings.repeat_interleave(__lowerCAmelCase , dim=0 ) # set timesteps A__ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) A__ = {} if accepts_offset: A__ = 1 self.scheduler.set_timesteps(__lowerCAmelCase , **__lowerCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) A__ , A__ = self.get_timesteps(__lowerCAmelCase , __lowerCAmelCase , self.device ) A__ = timesteps[:1].repeat(__lowerCAmelCase ) # Preprocess image A__ = preprocess(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = self.prepare_latents( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , text_embeddings.dtype , self.device , __lowerCAmelCase ) A__ = preprocess(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = self.prepare_latents( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , text_embeddings.dtype , self.device , __lowerCAmelCase ) A__ = slerp(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if clip_guidance_scale > 0: A__ = self.get_clip_image_embeddings(__lowerCAmelCase , __lowerCAmelCase ) A__ = self.get_clip_image_embeddings(__lowerCAmelCase , __lowerCAmelCase ) A__ = slerp( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A__ = content_text_input.input_ids.shape[-1] A__ = self.tokenizer([""""""] , padding="""max_length""" , max_length=__lowerCAmelCase , return_tensors="""pt""" ) A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt A__ = uncond_embeddings.repeat_interleave(__lowerCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8) A__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps A__ = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device="""cpu""" , dtype=__lowerCAmelCase ).to( self.device ) else: A__ = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) A__ = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A__ = {} if accepts_eta: A__ = eta # check if the scheduler accepts generator A__ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: A__ = generator with self.progress_bar(total=__lowerCAmelCase ): for i, t in enumerate(__lowerCAmelCase ): # expand the latents if we are doing classifier free guidance A__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A__ = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) # predict the noise residual A__ = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: A__ , A__ = noise_pred.chunk(2 ) A__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: A__ = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) A__ , A__ = self.cond_fn( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 A__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor A__ = 1 / 0.1_8_2_1_5 * latents A__ = self.vae.decode(__lowerCAmelCase ).sample A__ = (image / 2 + 0.5).clamp(0 , 1 ) A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A__ = self.numpy_to_pil(__lowerCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
274
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Any ) -> Union[str, Any]: """simple docstring""" A__ = ["""a""", """b""", """c"""] # Defaults to last layer if both are None A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""c"""] ) self.assertEqual(__lowerCAmelCase , [2] ) # Out indices set to match out features A__ , A__ = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features set to match out indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features selected from negative indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [-3, -1] ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase ) # Out features must be a list with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = BackboneMixin() A__ = ["""a""", """b""", """c"""] A__ = ["""a""", """c"""] A__ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly A__ = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) A__ = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
274
1
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput A : int = '''scheduler_config.json''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = 1 __lowerCamelCase : Optional[Any] = 2 __lowerCamelCase : Dict = 3 __lowerCamelCase : Any = 4 __lowerCamelCase : Union[str, Any] = 5 @dataclass class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : jnp.ndarray class A : '''simple docstring''' __lowerCamelCase : List[str] = SCHEDULER_CONFIG_NAME __lowerCamelCase : Optional[int] = ['''dtype'''] __lowerCamelCase : List[Any] = [] __lowerCamelCase : Dict = True @classmethod def a_ ( cls : List[Any] , __lowerCAmelCase : Dict[str, Any] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[Any]=False , **__lowerCAmelCase : Dict , ) -> Optional[int]: """simple docstring""" A__ , A__ = cls.load_config( pretrained_model_name_or_path=__lowerCAmelCase , subfolder=__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase , ) A__ , A__ = cls.from_config(__lowerCAmelCase , return_unused_kwargs=__lowerCAmelCase , **__lowerCAmelCase ) if hasattr(__lowerCAmelCase , """create_state""" ) and getattr(__lowerCAmelCase , """has_state""" , __lowerCAmelCase ): A__ = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def a_ ( self : Dict , __lowerCAmelCase : Union[str, os.PathLike] , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Union[str, Any] ) -> Any: """simple docstring""" self.save_config(save_directory=__lowerCAmelCase , push_to_hub=__lowerCAmelCase , **__lowerCAmelCase ) @property def a_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self._get_compatibles() @classmethod def a_ ( cls : List[Any] ) -> Any: """simple docstring""" A__ = list(set([cls.__name__] + cls._compatibles ) ) A__ = importlib.import_module(__name__.split(""".""" )[0] ) A__ = [ getattr(__lowerCAmelCase , __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase , __lowerCAmelCase ) ] return compatible_classes def __lowerCamelCase ( __a :jnp.ndarray , __a :Tuple[int] ) -> jnp.ndarray: """simple docstring""" assert len(__a ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__a ) - x.ndim) ) , __a ) def __lowerCamelCase ( __a :int , __a :Optional[Any]=0.999 , __a :Any=jnp.floataa ) -> jnp.ndarray: """simple docstring""" def alpha_bar(__a :Optional[Any] ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 A__ = [] for i in range(__a ): A__ = i / num_diffusion_timesteps A__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(__a ) / alpha_bar(__a ) , __a ) ) return jnp.array(__a , dtype=__a ) @flax.struct.dataclass class A : '''simple docstring''' __lowerCamelCase : jnp.ndarray __lowerCamelCase : jnp.ndarray __lowerCamelCase : jnp.ndarray @classmethod def a_ ( cls : str , __lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" A__ = scheduler.config if config.trained_betas is not None: A__ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": A__ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A__ = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A__ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) A__ = 1.0 - betas A__ = jnp.cumprod(__lowerCAmelCase , axis=0 ) return cls( alphas=__lowerCAmelCase , betas=__lowerCAmelCase , alphas_cumprod=__lowerCAmelCase , ) def __lowerCamelCase ( __a :CommonSchedulerState , __a :jnp.ndarray , __a :jnp.ndarray , __a :jnp.ndarray ) -> Optional[Any]: """simple docstring""" A__ = state.alphas_cumprod A__ = alphas_cumprod[timesteps] ** 0.5 A__ = sqrt_alpha_prod.flatten() A__ = broadcast_to_shape_from_left(__a , original_samples.shape ) A__ = (1 - alphas_cumprod[timesteps]) ** 0.5 A__ = sqrt_one_minus_alpha_prod.flatten() A__ = broadcast_to_shape_from_left(__a , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def __lowerCamelCase ( __a :CommonSchedulerState , __a :jnp.ndarray , __a :jnp.ndarray , __a :jnp.ndarray ) -> str: """simple docstring""" A__ , A__ = get_sqrt_alpha_prod(__a , __a , __a , __a ) A__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __lowerCamelCase ( __a :CommonSchedulerState , __a :jnp.ndarray , __a :jnp.ndarray , __a :jnp.ndarray ) -> str: """simple docstring""" A__ , A__ = get_sqrt_alpha_prod(__a , __a , __a , __a ) A__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
274
from collections import deque class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" A__ = process_name # process name A__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A__ = arrival_time A__ = burst_time # remaining burst time A__ = 0 # total time of the process wait in ready queue A__ = 0 # time from arrival time to completion time class A : '''simple docstring''' def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None: """simple docstring""" A__ = number_of_queues # time slice of queues that round robin algorithm applied A__ = time_slices # unfinished process is in this ready_queue A__ = queue # current time A__ = current_time # finished process is in this sequence queue A__ = deque() def a_ ( self : Dict ) -> list[str]: """simple docstring""" A__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]: """simple docstring""" return [q.burst_time for q in queue] def a_ ( self : Any , __lowerCAmelCase : Process ) -> int: """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]: """simple docstring""" A__ = deque() # sequence deque of finished process while len(__lowerCAmelCase ) != 0: A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A__ = 0 # set the process's turnaround time because it is finished A__ = self.current_time - cp.arrival_time # set the completion time A__ = self.current_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: """simple docstring""" A__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCAmelCase ) ): A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A__ = 0 # set the finish time A__ = self.current_time # update the process' turnaround time because it is finished A__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a_ ( self : List[Any] ) -> deque[Process]: """simple docstring""" for i in range(self.number_of_queues - 1 ): A__ , A__ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Union[str, Any] = Process('''P1''', 0, 5_3) A : Optional[Any] = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : int = Process('''P4''', 0, 2_4) A : Any = 3 A : List[Any] = [1_7, 2_5] A : Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) A : Optional[Any] = Process('''P1''', 0, 5_3) A : int = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : Tuple = Process('''P4''', 0, 2_4) A : Union[str, Any] = 3 A : Optional[Any] = [1_7, 2_5] A : Tuple = deque([Pa, Pa, Pa, Pa]) A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0) A : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
274
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) A__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : str ) -> Any: """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : Tuple ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
274
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A : str = logging.get_logger(__name__) A : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[Any] = '''layoutlmv3''' def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = max_ad_position_embeddings A__ = coordinate_size A__ = shape_size A__ = has_relative_attention_bias A__ = rel_pos_bins A__ = max_rel_pos A__ = has_spatial_attention_bias A__ = rel_ad_pos_bins A__ = max_rel_ad_pos A__ = text_embed A__ = visual_embed A__ = input_size A__ = num_channels A__ = patch_size A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = version.parse('''1.12''' ) @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def a_ ( self : Optional[int] ) -> float: """simple docstring""" return 1e-5 @property def a_ ( self : Tuple ) -> int: """simple docstring""" return 12 def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes A__ = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
274
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : int = { '''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''', '''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Tuple = '''falcon''' __lowerCamelCase : List[str] = ['''past_key_values'''] def __init__( self : Tuple , __lowerCAmelCase : Optional[Any]=6_50_24 , __lowerCAmelCase : Tuple=45_44 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : List[str]=71 , __lowerCAmelCase : Tuple=1e-5 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : str=None , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : int=False , __lowerCAmelCase : Tuple=11 , __lowerCAmelCase : int=11 , **__lowerCAmelCase : List[Any] , ) -> Union[str, Any]: """simple docstring""" A__ = vocab_size # Backward compatibility with n_embed kwarg A__ = kwargs.pop("""n_embed""" , __lowerCAmelCase ) A__ = hidden_size if n_embed is None else n_embed A__ = num_hidden_layers A__ = num_attention_heads A__ = layer_norm_epsilon A__ = initializer_range A__ = use_cache A__ = hidden_dropout A__ = attention_dropout A__ = bos_token_id A__ = eos_token_id A__ = num_attention_heads if num_kv_heads is None else num_kv_heads A__ = alibi A__ = new_decoder_architecture A__ = multi_query # Ignored when new_decoder_architecture is True A__ = parallel_attn A__ = bias super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @property def a_ ( self : Union[str, Any] ) -> int: """simple docstring""" return self.hidden_size // self.num_attention_heads @property def a_ ( self : List[str] ) -> Optional[int]: """simple docstring""" return not self.alibi
274
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = '''▁''' A : Any = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A : List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } A : Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off A : Optional[int] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None: """simple docstring""" A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = language_codes A__ = FAIRSEQ_LANGUAGE_CODES[language_codes] A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} A__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = load_json(__lowerCAmelCase ) A__ = {v: k for k, v in self.encoder.items()} A__ = spm_file A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) A__ = len(self.encoder ) A__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} A__ = {v: k for k, v in self.lang_token_to_id.items()} A__ = src_lang if src_lang is not None else """en""" A__ = tgt_lang A__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A__ = num_madeup_words @property def a_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def a_ ( self : Optional[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = [] A__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : int ) -> Dict: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = load_spm(self.spm_file , self.sp_model_kwargs ) def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding: """simple docstring""" A__ = src_lang A__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A__ = src_lang A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.get_lang_id(__lowerCAmelCase ) A__ = tgt_lang_id return inputs def a_ ( self : Dict ) -> int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" return self.lang_code_to_token[lang] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" A__ = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __lowerCamelCase ( __a :str ) -> Union[Dict, List]: """simple docstring""" with open(__a , """r""" ) as f: return json.load(__a ) def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None: """simple docstring""" with open(__a , """w""" ) as f: json.dump(__a , __a , indent=2 )
274
1
from __future__ import annotations import math def __lowerCamelCase ( __a :int , __a :int , __a :bool , __a :list[int] , __a :float ) -> int: """simple docstring""" if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(__a ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , ) return min( minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , ) def __lowerCamelCase ( ) -> None: """simple docstring""" A__ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] A__ = math.log(len(__a ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , __a , __a , __a ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
274
from __future__ import annotations from PIL import Image # Define glider example A : Any = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]: """simple docstring""" A__ = [] for i in range(len(__a ) ): A__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__a ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__a ) - 1: neighbour_count += cells[i + 1][j] if i < len(__a ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__a ) return next_generation def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]: """simple docstring""" A__ = [] for _ in range(__a ): # Create output image A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) ) A__ = img.load() # Save cells to image for x in range(len(__a ) ): for y in range(len(cells[0] ) ): A__ = 2_5_5 - cells[y][x] * 2_5_5 A__ = (colour, colour, colour) # Save image images.append(__a ) A__ = new_generation(__a ) return images if __name__ == "__main__": A : str = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
274
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Dict = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = '''canine''' def __init__( self : Dict , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Optional[int]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : int=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Optional[int]=1_63_84 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : List[Any]=1e-12 , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Optional[Any]=0XE000 , __lowerCAmelCase : int=0XE001 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_63_84 , __lowerCAmelCase : List[Any]=1_28 , **__lowerCAmelCase : int , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps # Character config: A__ = downsampling_rate A__ = upsampling_kernel_size A__ = num_hash_functions A__ = num_hash_buckets A__ = local_transformer_stride
274
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A : List[str] = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A : Dict = requests.get(image_url).content A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
274
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A : List[Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
274
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : List[str] = logging.get_logger(__name__) A : List[Any] = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''gpt_bigcode''' __lowerCamelCase : List[Any] = ['''past_key_values'''] __lowerCamelCase : List[Any] = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : str , __lowerCAmelCase : List[str]=5_02_57 , __lowerCAmelCase : int=10_24 , __lowerCAmelCase : List[str]=7_68 , __lowerCAmelCase : int=12 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Optional[int]="gelu_pytorch_tanh" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=1e-5 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=5_02_56 , __lowerCAmelCase : Any=5_02_56 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=True , **__lowerCAmelCase : str , ) -> Any: """simple docstring""" A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = n_inner A__ = activation_function A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = scale_attn_weights A__ = use_cache A__ = attention_softmax_in_fpaa A__ = scale_attention_softmax_in_fpaa A__ = multi_query A__ = bos_token_id A__ = eos_token_id super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
274
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A : Tuple = None A : Optional[Any] = logging.get_logger(__name__) A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A : List[str] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } A : List[str] = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } A : Optional[int] = '''▁''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = AlbertTokenizer def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" A__ = ( AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token ) super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
274
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowerCamelCase ( ) -> Tuple: """simple docstring""" A__ = HfArgumentParser(__a ) A__ = parser.parse_args_into_dataclasses()[0] A__ = TensorFlowBenchmark(args=__a ) try: A__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: A__ = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" A__ = """ """.join(str(__a ).split(""" """ )[:-1] ) A__ = """""" A__ = eval(str(__a ).split(""" """ )[-1] ) A__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__a ) if len(__a ) > 0: A__ = full_error_msg + begin_error_msg + str(__a ) raise ValueError(__a ) benchmark.run() if __name__ == "__main__": main()
274
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
1
def __lowerCamelCase ( __a :float , __a :float ) -> float: """simple docstring""" if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(__a ) * abs(__a ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
274
from math import ceil def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int: """simple docstring""" A__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): A__ = 2 * i + 1 A__ = 2 * i A__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
274
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : int = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''xlm-roberta''' def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
274
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) A : Tuple = logging.getLogger(__name__) def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple: """simple docstring""" A__ = np.argmax(__a , axis=1 ) return np.sum(outputs == labels ) def __lowerCamelCase ( __a :Tuple ) -> Dict: """simple docstring""" with open(__a , encoding="""utf_8""" ) as f: A__ = csv.reader(__a ) A__ = [] next(__a ) # skip the first line for line in tqdm(__a ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]: """simple docstring""" A__ = [] for dataset in encoded_datasets: A__ = len(__a ) A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) A__ = np.zeros((n_batch, 2) , dtype=np.intaa ) A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) A__ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__a ): A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = with_conta A__ = with_conta A__ = len(__a ) - 1 A__ = len(__a ) - 1 A__ = with_conta A__ = with_conta A__ = mc_label A__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) ) return tensor_datasets def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=__a , default="""""" ) parser.add_argument("""--eval_dataset""" , type=__a , default="""""" ) parser.add_argument("""--seed""" , type=__a , default=4_2 ) parser.add_argument("""--num_train_epochs""" , type=__a , default=3 ) parser.add_argument("""--train_batch_size""" , type=__a , default=8 ) parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=__a , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=__a , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=__a , default=0.01 ) parser.add_argument("""--lm_coef""" , type=__a , default=0.9 ) parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 ) parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) A__ = parser.parse_args() print(__a ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A__ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(__a , __a ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset A__ = ["""_start_""", """_delimiter_""", """_classify_"""] A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__a ) A__ = tokenizer.convert_tokens_to_ids(__a ) A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__a ) ) model.to(__a ) # Load and encode the datasets def tokenize_and_encode(__a :Tuple ): if isinstance(__a , __a ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) ) elif isinstance(__a , __a ): return obj return [tokenize_and_encode(__a ) for o in obj] logger.info("""Encoding dataset...""" ) A__ = load_rocstories_dataset(args.train_dataset ) A__ = load_rocstories_dataset(args.eval_dataset ) A__ = (train_dataset, eval_dataset) A__ = tokenize_and_encode(__a ) # Compute the max input length for the Transformer A__ = model.config.n_positions // 2 - 2 A__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders A__ = pre_process_datasets(__a , __a , __a , *__a ) A__ , A__ = tensor_datasets[0], tensor_datasets[1] A__ = TensorDataset(*__a ) A__ = RandomSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size ) A__ = TensorDataset(*__a ) A__ = SequentialSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: A__ = args.max_steps A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1 else: A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs A__ = list(model.named_parameters() ) A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] A__ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon ) A__ = get_linear_schedule_with_warmup( __a , num_warmup_steps=args.warmup_steps , num_training_steps=__a ) if args.do_train: A__ , A__ , A__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): A__ = 0 A__ = 0 A__ = tqdm(__a , desc="""Training""" ) for step, batch in enumerate(__a ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() A__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` A__ = os.path.join(args.output_dir , __a ) A__ = os.path.join(args.output_dir , __a ) torch.save(model_to_save.state_dict() , __a ) model_to_save.config.to_json_file(__a ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__a ) if args.do_eval: model.eval() A__ , A__ = 0, 0 A__ , A__ = 0, 0 for batch in tqdm(__a , desc="""Evaluating""" ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch with torch.no_grad(): A__ , A__ , A__ , A__ = model( __a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = mc_logits.detach().cpu().numpy() A__ = mc_labels.to("""cpu""" ).numpy() A__ = accuracy(__a , __a ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 A__ = eval_loss / nb_eval_steps A__ = eval_accuracy / nb_eval_examples A__ = tr_loss / nb_tr_steps if args.do_train else None A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} A__ = os.path.join(args.output_dir , """eval_results.txt""" ) with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , __a , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
274
1
def __lowerCamelCase ( __a :Optional[Any] ) -> int: """simple docstring""" A__ = 0 A__ = len(__a ) for i in range(n - 1 ): for j in range(i + 1 , __a ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def __lowerCamelCase ( __a :int ) -> Any: """simple docstring""" if len(__a ) <= 1: return arr, 0 A__ = len(__a ) // 2 A__ = arr[0:mid] A__ = arr[mid:] A__ , A__ = count_inversions_recursive(__a ) A__ , A__ = count_inversions_recursive(__a ) A__ , A__ = _count_cross_inversions(__a , __a ) A__ = inversion_p + inversions_q + cross_inversions return c, num_inversions def __lowerCamelCase ( __a :List[str] , __a :Any ) -> Any: """simple docstring""" A__ = [] A__ = A__ = A__ = 0 while i < len(__a ) and j < len(__a ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__a ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__a ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = [1_0, 2, 1, 5, 5, 2, 1_1] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A__ = count_inversions_bf(__a ) A__ , A__ = count_inversions_recursive(__a ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __a ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A__ = count_inversions_bf(__a ) A__ , A__ = count_inversions_recursive(__a ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __a ) # an empty list should also have zero inversions A__ = [] A__ = count_inversions_bf(__a ) A__ , A__ = count_inversions_recursive(__a ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __a ) if __name__ == "__main__": main()
274
import argparse from collections import defaultdict import yaml A : str = '''docs/source/en/_toctree.yml''' def __lowerCamelCase ( __a :str ) -> List[Any]: """simple docstring""" A__ = defaultdict(__a ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__a ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) A__ = sorted(__a , key=lambda __a : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__a ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__a ) # Sort return overview_doc def __lowerCamelCase ( __a :Any=False ) -> List[str]: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(__a ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(__a ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(__a ) # sort overall pipeline doc A__ = clean_doc_toc(__a ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A : Optional[Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
274
1
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class A : '''simple docstring''' def __init__( self : Dict , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : str = "openai/clip-vit-large-patch14" ) -> None: """simple docstring""" A__ = device A__ = CLIPTokenizerFast.from_pretrained(__lowerCAmelCase ) A__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] A__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] A__ = torchvision.transforms.Normalize(self.image_mean , self.image_std ) A__ = torchvision.transforms.Resize(2_24 ) A__ = torchvision.transforms.CenterCrop(2_24 ) def a_ ( self : Dict , __lowerCAmelCase : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.resize(__lowerCAmelCase ) A__ = self.center_crop(__lowerCAmelCase ) A__ = self.normalize(__lowerCAmelCase ) return images def __call__( self : Dict , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Dict ) -> Optional[int]: """simple docstring""" A__ = self.tokenizer(text=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.preprocess_img(__lowerCAmelCase ) A__ = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class A (nn.Module ): '''simple docstring''' def __init__( self : Dict , __lowerCAmelCase : Any=10 , __lowerCAmelCase : Union[str, Any]=0.0_1 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : List[Any]="image" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=False , __lowerCAmelCase : int=False , ) -> None: """simple docstring""" super().__init__() A__ = None A__ = device if device else get_device() if vqgan: A__ = vqgan else: A__ = load_vqgan(self.device , conf_path=__lowerCAmelCase , ckpt_path=__lowerCAmelCase ) self.vqgan.eval() if clip: A__ = clip else: A__ = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" ) self.clip.to(self.device ) A__ = ProcessorGradientFlow(device=self.device ) A__ = iterations A__ = lr A__ = log A__ = make_grid A__ = return_val A__ = quantize A__ = self.vqgan.decoder.z_shape def a_ ( self : List[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : Optional[int]=True ) -> Optional[int]: """simple docstring""" A__ = [] if output_path is None: A__ = """./animation.gif""" if input_path is None: A__ = self.save_path A__ = sorted(glob(input_path + """/*""" ) ) if not len(__lowerCAmelCase ): raise ValueError( """No images found in save path, aborting (did you pass save_intermediate=True to the generate""" """ function?)""" ) if len(__lowerCAmelCase ) == 1: print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" ) A__ = total_duration / len(__lowerCAmelCase ) A__ = [frame_duration] * len(__lowerCAmelCase ) if extend_frames: A__ = 1.5 A__ = 3 for file_name in paths: if file_name.endswith(""".png""" ): images.append(imageio.imread(__lowerCAmelCase ) ) imageio.mimsave(__lowerCAmelCase , __lowerCAmelCase , duration=__lowerCAmelCase ) print(f'gif saved to {output_path}' ) def a_ ( self : int , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Any=None ) -> List[Any]: """simple docstring""" if not (path or img): raise ValueError("""Input either path or tensor""" ) if img is not None: raise NotImplementedError A__ = preprocess(Image.open(__lowerCAmelCase ) , target_image_size=2_56 ).to(self.device ) A__ = preprocess_vqgan(__lowerCAmelCase ) A__ , *A__ = self.vqgan.encode(__lowerCAmelCase ) return z def a_ ( self : Optional[Any] , __lowerCAmelCase : Any ) -> Tuple: """simple docstring""" A__ = self.latent.detach().requires_grad_() A__ = base_latent + transform_vector if self.quantize: A__ , *A__ = self.vqgan.quantize(__lowerCAmelCase ) else: A__ = trans_latent return self.vqgan.decode(__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" A__ = self.clip_preprocessor(text=__lowerCAmelCase , images=__lowerCAmelCase , return_tensors="""pt""" , padding=__lowerCAmelCase ) A__ = self.clip(**__lowerCAmelCase ) A__ = clip_outputs.logits_per_image if weights is not None: A__ = similarity_logits * weights return similarity_logits.sum() def a_ ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = self._get_clip_similarity(pos_prompts["""prompts"""] , __lowerCAmelCase , weights=(1 / pos_prompts["""weights"""]) ) if neg_prompts: A__ = self._get_clip_similarity(neg_prompts["""prompts"""] , __lowerCAmelCase , weights=neg_prompts["""weights"""] ) else: A__ = torch.tensor([1] , device=self.device ) A__ = -torch.log(__lowerCAmelCase ) + torch.log(__lowerCAmelCase ) return loss def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str: """simple docstring""" A__ = torch.randn_like(self.latent , requires_grad=__lowerCAmelCase , device=self.device ) A__ = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() A__ = self._add_vector(__lowerCAmelCase ) A__ = loop_post_process(__lowerCAmelCase ) A__ = self._get_CLIP_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) print("""CLIP loss""" , __lowerCAmelCase ) if self.log: wandb.log({"""CLIP Loss""": clip_loss} ) clip_loss.backward(retain_graph=__lowerCAmelCase ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ) -> Dict: """simple docstring""" wandb.init(reinit=__lowerCAmelCase , project="""face-editor""" ) wandb.config.update({"""Positive Prompts""": positive_prompts} ) wandb.config.update({"""Negative Prompts""": negative_prompts} ) wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} ) if image_path: A__ = Image.open(__lowerCAmelCase ) A__ = image.resize((2_56, 2_56) ) wandb.log("""Original Image""" , wandb.Image(__lowerCAmelCase ) ) def a_ ( self : List[Any] , __lowerCAmelCase : Tuple ) -> Optional[int]: """simple docstring""" if not prompts: return [] A__ = [] A__ = [] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = [prompt.strip() for prompt in prompts.split("""|""" )] for prompt in prompts: if isinstance(__lowerCAmelCase , (tuple, list) ): A__ = prompt[0] A__ = float(prompt[1] ) elif ":" in prompt: A__ , A__ = prompt.split(""":""" ) A__ = float(__lowerCAmelCase ) else: A__ = prompt A__ = 1.0 processed_prompts.append(__lowerCAmelCase ) weights.append(__lowerCAmelCase ) return { "prompts": processed_prompts, "weights": torch.tensor(__lowerCAmelCase , device=self.device ), } def a_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=None , ) -> Dict: """simple docstring""" if image_path: A__ = self._get_latent(__lowerCAmelCase ) else: A__ = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) assert pos_prompts, "You must provide at least one positive prompt." A__ = self.process_prompts(__lowerCAmelCase ) A__ = self.process_prompts(__lowerCAmelCase ) if save_final and save_path is None: A__ = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) ) if not os.path.exists(__lowerCAmelCase ): os.makedirs(__lowerCAmelCase ) else: A__ = save_path + """_""" + get_timestamp() os.makedirs(__lowerCAmelCase ) A__ = save_path A__ = self.vqgan.decode(self.latent )[0] if show_intermediate: print("""Original Image""" ) show_pil(custom_to_pil(__lowerCAmelCase ) ) A__ = loop_post_process(__lowerCAmelCase ) for iter, transformed_img in enumerate(self._optimize_CLIP(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ): if show_intermediate: show_pil(__lowerCAmelCase ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) ) if self.log: wandb.log({"""Image""": wandb.Image(__lowerCAmelCase )} ) if show_final: show_pil(__lowerCAmelCase ) if save_final: transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
274
def __lowerCamelCase ( __a :str ) -> list: """simple docstring""" A__ = [0] * len(__a ) for i in range(1 , len(__a ) ): # use last results for better performance - dynamic programming A__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: A__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 A__ = j return prefix_result def __lowerCamelCase ( __a :str ) -> int: """simple docstring""" return max(prefix_function(__a ) ) if __name__ == "__main__": import doctest doctest.testmod()
274
1
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A : '''simple docstring''' __lowerCamelCase : int __lowerCamelCase : int class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int ) -> Dict: """simple docstring""" A__ = [[] for _ in range(__lowerCAmelCase )] A__ = size def __getitem__( self : Dict , __lowerCAmelCase : int ) -> Iterator[Edge]: """simple docstring""" return iter(self._graph[vertex] ) @property def a_ ( self : Tuple ) -> str: """simple docstring""" return self._size def a_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Tuple: """simple docstring""" if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(__lowerCAmelCase , __lowerCAmelCase ) ) def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int | None: """simple docstring""" A__ = deque([start_vertex] ) A__ = [None] * self.size A__ = 0 while queue: A__ = queue.popleft() A__ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A__ = current_distance + edge.weight A__ = distances[edge.destination_vertex] if ( isinstance(__lowerCAmelCase , __lowerCAmelCase ) and new_distance >= dest_vertex_distance ): continue A__ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
274
def __lowerCamelCase ( __a :int = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A__ = limit + 1 A__ = [0] * limit for first_term in range(1 , __a ): for n in range(__a , __a , __a ): A__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A__ = sum(1 for x in frequency[1:limit] if x == 1_0 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
274
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : str = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[str] = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
274
class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A : '''simple docstring''' def __init__( self : List[Any] ) -> str: """simple docstring""" A__ = [ [], [], [], ] def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(__lowerCAmelCase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : Tuple ) -> str: """simple docstring""" return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class A : '''simple docstring''' def __init__( self : int ) -> str: """simple docstring""" A__ = [] def a_ ( self : int , __lowerCAmelCase : int ) -> None: """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(__lowerCAmelCase ) def a_ ( self : List[str] ) -> int: """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: A__ = min(self.queue ) self.queue.remove(__lowerCAmelCase ) return data def __str__( self : List[Any] ) -> str: """simple docstring""" return str(self.queue ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __lowerCamelCase ( ) -> int: """simple docstring""" A__ = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
274
1
from math import ceil def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int: """simple docstring""" A__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): A__ = 2 * i + 1 A__ = 2 * i A__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
274
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) A__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : str ) -> Any: """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : Tuple ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
274
1
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCamelCase ( __a :Optional[Any] , __a :str , __a :Dict ) -> Union[str, Any]: """simple docstring""" A__ = 0 if start < end: A__ = randint(__a , __a ) A__ = a[end] A__ = a[pivot] A__ = temp A__ , A__ = _in_place_partition(__a , __a , __a ) count += _in_place_quick_sort(__a , __a , p - 1 ) count += _in_place_quick_sort(__a , p + 1 , __a ) return count def __lowerCamelCase ( __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> List[str]: """simple docstring""" A__ = 0 A__ = randint(__a , __a ) A__ = a[end] A__ = a[pivot] A__ = temp A__ = start - 1 for index in range(__a , __a ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value A__ = new_pivot_index + 1 A__ = a[new_pivot_index] A__ = a[index] A__ = temp A__ = a[new_pivot_index + 1] A__ = a[end] A__ = temp return new_pivot_index + 1, count A : List[str] = TemporaryFile() A : Optional[Any] = 1_0_0 # 1000 elements are to be sorted A, A : Tuple = 0, 1 # mean and standard deviation A : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array A : Dict = np.load(outfile) A : Optional[Any] = len(M) - 1 A : str = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
274
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time A : Dict = Lock() def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__a ) process_lock.release() # receive your right neighbor's value process_lock.acquire() A__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left A__ = min(__a , __a ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__a ) process_lock.release() # receive your left neighbor's value process_lock.acquire() A__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right A__ = max(__a , __a ) # after all swaps are performed, send the values back to main result_pipe[1].send(__a ) def __lowerCamelCase ( __a :List[str] ) -> int: """simple docstring""" A__ = [] A__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) A__ = temp_rs A__ = temp_rr for i in range(1 , len(__a ) - 1 ): A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) A__ = temp_rs A__ = temp_rr process_array_.append( Process( target=__a , args=( len(__a ) - 1, arr[len(__a ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__a ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__a ) ): A__ = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCamelCase ( ) -> str: """simple docstring""" A__ = list(range(1_0 , 0 , -1 ) ) print("""Initial List""" ) print(*__a ) A__ = odd_even_transposition(__a ) print("""Sorted List\n""" ) print(*__a ) if __name__ == "__main__": main()
274
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask A : Tuple = logging.getLogger(__name__) class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : int , __lowerCAmelCase : List[Any]=-1 ) -> Union[str, Any]: """simple docstring""" A__ = label_idx def a_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = mode.value A__ = os.path.join(__lowerCAmelCase , f'{mode}.txt' ) A__ = 1 A__ = [] with open(__lowerCAmelCase , encoding="""utf-8""" ) as f: A__ = [] A__ = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) ) guid_index += 1 A__ = [] A__ = [] else: A__ = line.split(""" """ ) words.append(splits[0] ) if len(__lowerCAmelCase ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) ) return examples def a_ ( self : Tuple , __lowerCAmelCase : TextIO , __lowerCAmelCase : TextIO , __lowerCAmelCase : List ) -> List[Any]: """simple docstring""" A__ = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(__lowerCAmelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: A__ = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(__lowerCAmelCase ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def a_ ( self : Any , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" if path: with open(__lowerCAmelCase , """r""" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ) -> List[Any]: """simple docstring""" super().__init__(label_idx=-2 ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" if path: with open(__lowerCAmelCase , """r""" ) as f: A__ = f.read().splitlines() if "O" not in labels: A__ = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def a_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = mode.value A__ = os.path.join(__lowerCAmelCase , f'{mode}.txt' ) A__ = 1 A__ = [] with open(__lowerCAmelCase , encoding="""utf-8""" ) as f: for sentence in parse_incr(__lowerCAmelCase ): A__ = [] A__ = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) ) guid_index += 1 return examples def a_ ( self : int , __lowerCAmelCase : TextIO , __lowerCAmelCase : TextIO , __lowerCAmelCase : List ) -> Union[str, Any]: """simple docstring""" A__ = 0 for sentence in parse_incr(__lowerCAmelCase ): A__ = preds_list[example_id] A__ = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(__lowerCAmelCase ) example_id += 1 def a_ ( self : Any , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" if path: with open(__lowerCAmelCase , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
274
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowerCamelCase ( __a :Dict ) -> Any: """simple docstring""" A__ = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__a , __a ) def __lowerCamelCase ( __a :str ) -> Union[str, Any]: """simple docstring""" A__ , A__ = emb.weight.shape A__ = nn.Linear(__a , __a , bias=__a ) A__ = emb.weight.data return lin_layer def __lowerCamelCase ( __a :str ) -> List[str]: """simple docstring""" A__ = torch.load(__a , map_location="""cpu""" ) A__ = Namespace(**checkpoint["""cfg"""]["""model"""] ) A__ = checkpoint["""model"""] remove_ignore_keys_(__a ) A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0] A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} A__ = XGLMConfig( vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A__ = XGLMForCausalLM(__a ) A__ = model.load_state_dict(__a , strict=__a ) print(__a ) A__ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": A : int = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') A : str = parser.parse_args() A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
274
1
import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Tuple = RoCBertTokenizer __lowerCamelCase : str = None __lowerCamelCase : Any = False __lowerCamelCase : Tuple = True __lowerCamelCase : List[str] = filter_non_english def a_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() A__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""] A__ = {} A__ = {} for i, value in enumerate(__lowerCAmelCase ): A__ = i A__ = i A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer: json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer: json.dump(__lowerCAmelCase , __lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) def a_ ( self : List[str] ) -> Tuple: """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A__ = tokenizer.tokenize("""你好[SEP]你是谁""" ) self.assertListEqual(__lowerCAmelCase , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase ) , [5, 6, 2, 5, 7, 8] ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def a_ ( self : Any ) -> Any: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def a_ ( self : Tuple ) -> int: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def a_ ( self : Optional[Any] ) -> str: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def a_ ( self : List[str] ) -> List[str]: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def a_ ( self : Dict ) -> Dict: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def a_ ( self : int ) -> List[str]: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def a_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" A__ = RoCBertBasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def a_ ( self : int ) -> str: """simple docstring""" A__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] A__ = {} for i, token in enumerate(__lowerCAmelCase ): A__ = i A__ = RoCBertWordpieceTokenizer(vocab=__lowerCAmelCase , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def a_ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def a_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def a_ ( self : str ) -> List[Any]: """simple docstring""" self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def a_ ( self : List[str] ) -> Optional[int]: """simple docstring""" A__ = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) if self.test_rust_tokenizer: A__ = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) def a_ ( self : Any ) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) A__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.' A__ = tokenizer_r.encode_plus( __lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , ) A__ = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , """do_lower_case""" ) else False A__ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def a_ ( self : Dict ) -> Optional[int]: """simple docstring""" A__ = ["""的""", """人""", """有"""] A__ = """""".join(__lowerCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = True A__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) A__ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) A__ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) A__ = False A__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) A__ = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase ) A__ = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". A__ = [ f'##{token}' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase ) ] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) @slow def a_ ( self : Union[str, Any] ) -> str: """simple docstring""" A__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file ) A__ = tokenizer.encode("""你好""" , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer.encode("""你是谁""" , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase ) A__ = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def a_ ( self : Dict ) -> int: """simple docstring""" A__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): A__ = """你好,你是谁""" A__ = tokenizer.tokenize(__lowerCAmelCase ) A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) A__ = tokenizer.convert_tokens_to_shape_ids(__lowerCAmelCase ) A__ = tokenizer.convert_tokens_to_pronunciation_ids(__lowerCAmelCase ) A__ = tokenizer.prepare_for_model( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) A__ = tokenizer.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
274
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A (unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def a_ ( self : Any ) -> str: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = FlaxAlbertModelTester(self ) @slow def a_ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""albert-base-v2""" ) A__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase ) @require_flax class A (unittest.TestCase ): '''simple docstring''' @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCAmelCase ) A__ = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
274
1
import copy import re class A : '''simple docstring''' __lowerCamelCase : Dict = '''hp''' __lowerCamelCase : List[str] = {} __lowerCamelCase : Any = None @classmethod def a_ ( cls : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any ) -> List[str]: """simple docstring""" A__ = prefix A__ = defaults cls.build_naming_info() @staticmethod def a_ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> str: """simple docstring""" if len(__lowerCAmelCase ) == 0: return "" A__ = None if any(char.isdigit() for char in word ): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__lowerCAmelCase ) + 1 ): A__ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A__ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__lowerCAmelCase : List[str] ): A__ = """""" while integer != 0: A__ = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s A__ = 0 while True: A__ = word + """#""" + int_to_alphabetic(__lowerCAmelCase ) if sword in info["reverse_short_word"]: continue else: A__ = sword break A__ = short_word A__ = word return short_word @staticmethod def a_ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" A__ = param_name.split("""_""" ) A__ = [TrialShortNamer.shortname_for_word(__lowerCAmelCase , __lowerCAmelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A__ = ["""""", """_"""] for separator in separators: A__ = separator.join(__lowerCAmelCase ) if shortname not in info["reverse_short_param"]: A__ = shortname A__ = param_name return shortname return param_name @staticmethod def a_ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ) -> Optional[Any]: """simple docstring""" A__ = TrialShortNamer.shortname_for_key(__lowerCAmelCase , __lowerCAmelCase ) A__ = short_name A__ = param_name @classmethod def a_ ( cls : Dict ) -> Tuple: """simple docstring""" if cls.NAMING_INFO is not None: return A__ = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } A__ = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(__lowerCAmelCase , __lowerCAmelCase ) A__ = info @classmethod def a_ ( cls : str , __lowerCAmelCase : Optional[int] ) -> Any: """simple docstring""" cls.build_naming_info() assert cls.PREFIX is not None A__ = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A__ = cls.NAMING_INFO["""short_param"""][k] if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = 1 if v else 0 A__ = """""" if isinstance(__lowerCAmelCase , (int, float) ) else """-""" A__ = f'{key}{sep}{v}' name.append(__lowerCAmelCase ) return "_".join(__lowerCAmelCase ) @classmethod def a_ ( cls : List[str] , __lowerCAmelCase : Optional[Any] ) -> str: """simple docstring""" A__ = repr[len(cls.PREFIX ) + 1 :] if repr == "": A__ = [] else: A__ = repr.split("""_""" ) A__ = {} for value in values: if "-" in value: A__ , A__ = value.split("""-""" ) else: A__ = re.sub("""[0-9.]""" , """""" , __lowerCAmelCase ) A__ = float(re.sub("""[^0-9.]""" , """""" , __lowerCAmelCase ) ) A__ = cls.NAMING_INFO["""reverse_short_param"""][p_k] A__ = p_v for k in cls.DEFAULTS: if k not in parameters: A__ = cls.DEFAULTS[k] return parameters
274
from sklearn.metrics import fa_score import datasets A : Any = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' A : List[Any] = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' A : List[Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]: """simple docstring""" A__ = fa_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase ) return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
274
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A : str = logging.get_logger(__name__) A : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[Any] = '''layoutlmv3''' def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = max_ad_position_embeddings A__ = coordinate_size A__ = shape_size A__ = has_relative_attention_bias A__ = rel_pos_bins A__ = max_rel_pos A__ = has_spatial_attention_bias A__ = rel_ad_pos_bins A__ = max_rel_ad_pos A__ = text_embed A__ = visual_embed A__ = input_size A__ = num_channels A__ = patch_size A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = version.parse('''1.12''' ) @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def a_ ( self : Optional[int] ) -> float: """simple docstring""" return 1e-5 @property def a_ ( self : Tuple ) -> int: """simple docstring""" return 12 def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes A__ = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
274
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : int = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''xlm-roberta''' def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
274
1
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[int] = (EulerDiscreteScheduler,) __lowerCamelCase : Tuple = 10 def a_ ( self : Optional[int] , **__lowerCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" A__ = { """num_train_timesteps""": 11_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", } config.update(**__lowerCAmelCase ) return config def a_ ( self : Any ) -> Optional[int]: """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def a_ ( self : str ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase ) def a_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__lowerCAmelCase ) def a_ ( self : List[str] ) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def a_ ( self : List[str] ) -> List[Any]: """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) A__ = model(__lowerCAmelCase , __lowerCAmelCase ) A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(__lowerCAmelCase ) ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def a_ ( self : int ) -> Tuple: """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type="""v_prediction""" ) A__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) A__ = model(__lowerCAmelCase , __lowerCAmelCase ) A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(__lowerCAmelCase ) ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3 def a_ ( self : Tuple ) -> List[str]: """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() A__ = sample.to(__lowerCAmelCase ) for t in scheduler.timesteps: A__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) A__ = model(__lowerCAmelCase , __lowerCAmelCase ) A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(__lowerCAmelCase ) ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCAmelCase , use_karras_sigmas=__lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__lowerCAmelCase ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() A__ = sample.to(__lowerCAmelCase ) for t in scheduler.timesteps: A__ = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase ) A__ = model(__lowerCAmelCase , __lowerCAmelCase ) A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ) A__ = output.prev_sample A__ = torch.sum(torch.abs(__lowerCAmelCase ) ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
274
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean A : str = 0 A : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right A : Union[str, Any] = tuple[int, int] class A : '''simple docstring''' def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None: """simple docstring""" A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = g_cost A__ = parent A__ = self.calculate_heuristic() A__ = self.g_cost + self.h_cost def a_ ( self : Dict ) -> float: """simple docstring""" A__ = self.pos_x - self.goal_x A__ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : int , __lowerCAmelCase : Node ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple: """simple docstring""" A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase ) A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase ) A__ = [self.start] A__ = [] A__ = False def a_ ( self : List[str] ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__lowerCAmelCase ) self.closed_nodes.append(__lowerCAmelCase ) A__ = self.get_successors(__lowerCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCAmelCase ) else: self.open_nodes.append(__lowerCAmelCase ) return [self.start.pos] def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]: """simple docstring""" A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) ) return successors def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]: """simple docstring""" A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None: """simple docstring""" A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = False def a_ ( self : int ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A__ = self.fwd_astar.open_nodes.pop(0 ) A__ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __lowerCAmelCase , __lowerCAmelCase ) self.fwd_astar.closed_nodes.append(__lowerCAmelCase ) self.bwd_astar.closed_nodes.append(__lowerCAmelCase ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = astar.open_nodes.pop( astar.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__lowerCAmelCase ) else: astar.open_nodes.append(__lowerCAmelCase ) return [self.fwd_astar.start.pos] def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]: """simple docstring""" A__ = self.fwd_astar.retrace_path(__lowerCAmelCase ) A__ = self.bwd_astar.retrace_path(__lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] A : Optional[int] = (0, 0) A : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Dict = time.time() A : Optional[Any] = AStar(init, goal) A : Optional[int] = a_star.search() A : Optional[int] = time.time() - start_time print(F'''AStar execution time = {end_time:f} seconds''') A : Dict = time.time() A : Tuple = BidirectionalAStar(init, goal) A : List[Any] = time.time() - bd_start_time print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
274
1
import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : '''simple docstring''' def __init__( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : Optional[int]=30 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : int=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Dict=10 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=0.6 , __lowerCAmelCase : Optional[int]=None , ) -> str: """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = mask_ratio A__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def a_ ( self : int ) -> Union[str, Any]: """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def a_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def a_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ) -> List[Any]: """simple docstring""" A__ = ViTMAEModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ) -> Any: """simple docstring""" A__ = ViTMAEForPreTraining(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) A__ = (self.image_size // self.patch_size) ** 2 A__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images A__ = 1 A__ = ViTMAEForPreTraining(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(__lowerCAmelCase ) A__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def a_ ( self : Optional[int] ) -> List[str]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () __lowerCamelCase : Union[str, Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} __lowerCamelCase : int = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : str = False __lowerCamelCase : str = False def a_ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" A__ = ViTMAEModelTester(self ) A__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 ) def a_ ( self : Any ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" pass def a_ ( self : int ) -> int: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) ) def a_ ( self : str ) -> Any: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCAmelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def a_ ( self : Dict ) -> Tuple: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def a_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" np.random.seed(2 ) A__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) A__ = torch.from_numpy(__lowerCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument A__ = pt_noise super().check_pt_tf_models(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) A__ = outputs[0].cpu().numpy() A__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowerCAmelCase ) A__ = model_class.from_pretrained(__lowerCAmelCase ) model.to(__lowerCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) # Make sure we don't have nans A__ = after_outputs[0].cpu().numpy() A__ = 0 A__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowerCAmelCase , 1e-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def a_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def a_ ( self : str ) -> Dict: """simple docstring""" pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def a_ ( self : int ) -> List[Any]: """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def a_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a_ ( self : int ) -> Union[str, Any]: """simple docstring""" pass @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTMAEModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A (unittest.TestCase ): '''simple docstring''' @cached_property def a_ ( self : Optional[Any] ) -> int: """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def a_ ( self : int ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) A__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__lowerCAmelCase ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) A__ = ViTMAEConfig() A__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) A__ = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): A__ = model(**__lowerCAmelCase , noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) ) # verify the logits A__ = torch.Size((1, 1_96, 7_68) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) A__ = torch.tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__lowerCAmelCase ) , atol=1e-4 ) )
274
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Any ) -> Union[str, Any]: """simple docstring""" A__ = ["""a""", """b""", """c"""] # Defaults to last layer if both are None A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""c"""] ) self.assertEqual(__lowerCAmelCase , [2] ) # Out indices set to match out features A__ , A__ = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features set to match out indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features selected from negative indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [-3, -1] ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase ) # Out features must be a list with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = BackboneMixin() A__ = ["""a""", """b""", """c"""] A__ = ["""a""", """c"""] A__ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly A__ = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) A__ = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
274
1
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A : int = logging.getLogger(__name__) A : Optional[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) A : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A : '''simple docstring''' __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE )} , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class A : '''simple docstring''' __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) __lowerCamelCase : float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __lowerCamelCase : float = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) __lowerCamelCase : int = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) __lowerCamelCase : int = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __lowerCamelCase ( __a :DataTrainingArguments , __a :PreTrainedTokenizer , __a :bool = False , __a :Optional[str] = None , ) -> int: """simple docstring""" def _dataset(__a :Any , __a :Tuple=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=__a , file_path=__a , block_size=args.block_size , ref_path=__a , ) return LineByLineTextDataset(tokenizer=__a , file_path=__a , block_size=args.block_size ) else: return TextDataset( tokenizer=__a , file_path=__a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__a , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(__a ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __lowerCamelCase ( ) -> str: """simple docstring""" A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A__ , A__ , A__ = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: A__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: A__ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: A__ = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) A__ = AutoModelWithLMHead.from_config(__a ) model.resize_token_embeddings(len(__a ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: A__ = tokenizer.max_len # Our input block size will be the max possible for the model else: A__ = min(data_args.block_size , tokenizer.max_len ) # Get datasets A__ = ( get_dataset(__a , tokenizer=__a , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) A__ = ( get_dataset(__a , tokenizer=__a , evaluate=__a , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": A__ = DataCollatorForPermutationLanguageModeling( tokenizer=__a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: A__ = DataCollatorForWholeWordMask( tokenizer=__a , mlm_probability=data_args.mlm_probability ) else: A__ = DataCollatorForLanguageModeling( tokenizer=__a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A__ = Trainer( model=__a , args=__a , data_collator=__a , train_dataset=__a , eval_dataset=__a , prediction_loss_only=__a , ) # Training if training_args.do_train: A__ = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=__a ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) A__ = trainer.evaluate() A__ = math.exp(eval_output["""eval_loss"""] ) A__ = {"""perplexity""": perplexity} A__ = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , __a , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(__a ) return results def __lowerCamelCase ( __a :Optional[Any] ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
274
from collections import deque class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" A__ = process_name # process name A__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A__ = arrival_time A__ = burst_time # remaining burst time A__ = 0 # total time of the process wait in ready queue A__ = 0 # time from arrival time to completion time class A : '''simple docstring''' def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None: """simple docstring""" A__ = number_of_queues # time slice of queues that round robin algorithm applied A__ = time_slices # unfinished process is in this ready_queue A__ = queue # current time A__ = current_time # finished process is in this sequence queue A__ = deque() def a_ ( self : Dict ) -> list[str]: """simple docstring""" A__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]: """simple docstring""" return [q.burst_time for q in queue] def a_ ( self : Any , __lowerCAmelCase : Process ) -> int: """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]: """simple docstring""" A__ = deque() # sequence deque of finished process while len(__lowerCAmelCase ) != 0: A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A__ = 0 # set the process's turnaround time because it is finished A__ = self.current_time - cp.arrival_time # set the completion time A__ = self.current_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: """simple docstring""" A__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCAmelCase ) ): A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A__ = 0 # set the finish time A__ = self.current_time # update the process' turnaround time because it is finished A__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a_ ( self : List[Any] ) -> deque[Process]: """simple docstring""" for i in range(self.number_of_queues - 1 ): A__ , A__ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Union[str, Any] = Process('''P1''', 0, 5_3) A : Optional[Any] = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : int = Process('''P4''', 0, 2_4) A : Any = 3 A : List[Any] = [1_7, 2_5] A : Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) A : Optional[Any] = Process('''P1''', 0, 5_3) A : int = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : Tuple = Process('''P4''', 0, 2_4) A : Union[str, Any] = 3 A : Optional[Any] = [1_7, 2_5] A : Tuple = deque([Pa, Pa, Pa, Pa]) A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0) A : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
274
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## A : Optional[int] = 1_6 A : Tuple = 3_2 def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 ) -> Tuple: """simple docstring""" A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) A__ = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__a :Any ): # max_length=None => use the model max length (it's actually the default) A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A__ = datasets.map( __a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A__ = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__a :str ): # On TPU it's best to pad everything to the same length or training will be very slow. A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A__ = 1_6 elif accelerator.mixed_precision != "no": A__ = 8 else: A__ = None return tokenizer.pad( __a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , ) # Instantiate dataloaders. A__ = DataLoader( tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a ) A__ = DataLoader( tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders A : List[str] = mocked_dataloaders # noqa: F811 def __lowerCamelCase ( __a :Any , __a :Optional[Any] ) -> List[str]: """simple docstring""" if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1": A__ = 2 # New Code # A__ = int(args.gradient_accumulation_steps ) # Initialize accelerator A__ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A__ = config["""lr"""] A__ = int(config["""num_epochs"""] ) A__ = int(config["""seed"""] ) A__ = int(config["""batch_size"""] ) A__ = evaluate.load("""glue""" , """mrpc""" ) set_seed(__a ) A__ , A__ = get_dataloaders(__a , __a ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A__ = model.to(accelerator.device ) # Instantiate optimizer A__ = AdamW(params=model.parameters() , lr=__a ) # Instantiate scheduler A__ = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A__ , A__ , A__ , A__ , A__ = accelerator.prepare( __a , __a , __a , __a , __a ) # Now we train the model for epoch in range(__a ): model.train() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__a ): A__ = model(**__a ) A__ = output.loss accelerator.backward(__a ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A__ = model(**__a ) A__ = outputs.logits.argmax(dim=-1 ) A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__a , references=__a , ) A__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , __a ) def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) A__ = parser.parse_args() A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(__a , __a ) if __name__ == "__main__": main()
274
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A : str = logging.get_logger(__name__) A : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[Any] = '''layoutlmv3''' def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = max_ad_position_embeddings A__ = coordinate_size A__ = shape_size A__ = has_relative_attention_bias A__ = rel_pos_bins A__ = max_rel_pos A__ = has_spatial_attention_bias A__ = rel_ad_pos_bins A__ = max_rel_ad_pos A__ = text_embed A__ = visual_embed A__ = input_size A__ = num_channels A__ = patch_size A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = version.parse('''1.12''' ) @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def a_ ( self : Optional[int] ) -> float: """simple docstring""" return 1e-5 @property def a_ ( self : Tuple ) -> int: """simple docstring""" return 12 def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes A__ = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
274
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Tuple = ['''image_processor''', '''tokenizer'''] __lowerCamelCase : Dict = '''CLIPImageProcessor''' __lowerCamelCase : Any = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self : Optional[int] , __lowerCAmelCase : int=None , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Tuple ) -> List[str]: """simple docstring""" A__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __lowerCAmelCase , ) A__ = kwargs.pop("""feature_extractor""" ) A__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[Any]=None , **__lowerCAmelCase : str ) -> Optional[int]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: A__ = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if images is not None: A__ = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None and images is not None: A__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase ) def a_ ( self : Any , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : List[str] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> Dict: """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def a_ ( self : Tuple ) -> Optional[int]: """simple docstring""" A__ = self.tokenizer.model_input_names A__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCAmelCase , ) return self.image_processor_class @property def a_ ( self : int ) -> Dict: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCAmelCase , ) return self.image_processor
274
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = '''▁''' A : Any = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A : List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } A : Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off A : Optional[int] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None: """simple docstring""" A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = language_codes A__ = FAIRSEQ_LANGUAGE_CODES[language_codes] A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} A__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = load_json(__lowerCAmelCase ) A__ = {v: k for k, v in self.encoder.items()} A__ = spm_file A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) A__ = len(self.encoder ) A__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} A__ = {v: k for k, v in self.lang_token_to_id.items()} A__ = src_lang if src_lang is not None else """en""" A__ = tgt_lang A__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A__ = num_madeup_words @property def a_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def a_ ( self : Optional[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = [] A__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : int ) -> Dict: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = load_spm(self.spm_file , self.sp_model_kwargs ) def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding: """simple docstring""" A__ = src_lang A__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A__ = src_lang A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.get_lang_id(__lowerCAmelCase ) A__ = tgt_lang_id return inputs def a_ ( self : Dict ) -> int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" return self.lang_code_to_token[lang] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" A__ = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __lowerCamelCase ( __a :str ) -> Union[Dict, List]: """simple docstring""" with open(__a , """r""" ) as f: return json.load(__a ) def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None: """simple docstring""" with open(__a , """w""" ) as f: json.dump(__a , __a , indent=2 )
274
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A : str = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
274
from __future__ import annotations from PIL import Image # Define glider example A : Any = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]: """simple docstring""" A__ = [] for i in range(len(__a ) ): A__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__a ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__a ) - 1: neighbour_count += cells[i + 1][j] if i < len(__a ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__a ) return next_generation def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]: """simple docstring""" A__ = [] for _ in range(__a ): # Create output image A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) ) A__ = img.load() # Save cells to image for x in range(len(__a ) ): for y in range(len(cells[0] ) ): A__ = 2_5_5 - cells[y][x] * 2_5_5 A__ = (colour, colour, colour) # Save image images.append(__a ) A__ = new_generation(__a ) return images if __name__ == "__main__": A : str = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
274
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType A : Any = logging.get_logger(__name__) A : Union[str, Any] = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = '''imagegpt''' __lowerCamelCase : Union[str, Any] = ['''past_key_values'''] __lowerCamelCase : Optional[int] = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Any , __lowerCAmelCase : Optional[Any]=5_12 + 1 , __lowerCAmelCase : Union[str, Any]=32 * 32 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : List[Any]=24 , __lowerCAmelCase : Tuple=8 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[int]="quick_gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=1e-5 , __lowerCAmelCase : List[str]=0.0_2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False , **__lowerCAmelCase : str , ) -> Tuple: """simple docstring""" A__ = vocab_size A__ = n_positions A__ = n_embd A__ = n_layer A__ = n_head A__ = n_inner A__ = activation_function A__ = resid_pdrop A__ = embd_pdrop A__ = attn_pdrop A__ = layer_norm_epsilon A__ = initializer_range A__ = scale_attn_weights A__ = use_cache A__ = scale_attn_by_inverse_layer_idx A__ = reorder_and_upcast_attn A__ = tie_word_embeddings super().__init__(tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase ) class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ] ) def a_ ( self : Optional[Any] , __lowerCAmelCase : "FeatureExtractionMixin" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , ) -> Mapping[str, Any]: """simple docstring""" A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict(preprocessor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) ) return inputs
274
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A : List[str] = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A : Dict = requests.get(image_url).content A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
274
1
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def __lowerCamelCase ( __a :List[Any] , __a :Optional[Any]=False ) -> Optional[Any]: """simple docstring""" A__ = OmegaConf.load(__a ) if display: print(yaml.dump(OmegaConf.to_container(__a ) ) ) return config def __lowerCamelCase ( __a :List[str] , __a :str=None , __a :int=None ) -> int: """simple docstring""" if conf_path is None: A__ = """./model_checkpoints/vqgan_only.yaml""" A__ = load_config(__a , display=__a ) A__ = VQModel(**config.model.params ) if ckpt_path is None: A__ = """./model_checkpoints/vqgan_only.pt""" A__ = torch.load(__a , map_location=__a ) if ".ckpt" in ckpt_path: A__ = sd["""state_dict"""] model.load_state_dict(__a , strict=__a ) model.to(__a ) del sd return model def __lowerCamelCase ( __a :str , __a :Optional[Any] ) -> Dict: """simple docstring""" A__ , A__ , A__ = model.encode(__a ) print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' ) A__ = model.decode(__a ) return xrec def __lowerCamelCase ( __a :int , __a :Dict=False ) -> Any: """simple docstring""" A__ , A__ = string.rsplit(""".""" , 1 ) if reload: A__ = importlib.import_module(__a ) importlib.reload(__a ) return getattr(importlib.import_module(__a , package=__a ) , cls ) def __lowerCamelCase ( __a :Optional[Any] ) -> str: """simple docstring""" if "target" not in config: raise KeyError("""Expected key `target` to instantiate.""" ) return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) ) def __lowerCamelCase ( __a :Tuple , __a :Union[str, Any] , __a :Dict=True , __a :str=True ) -> str: """simple docstring""" A__ = instantiate_from_config(__a ) if sd is not None: model.load_state_dict(__a ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def __lowerCamelCase ( __a :List[Any] , __a :Dict , __a :Tuple , __a :Tuple ) -> Optional[Any]: """simple docstring""" if ckpt: A__ = torch.load(__a , map_location="""cpu""" ) A__ = pl_sd["""global_step"""] print(F'loaded model from global step {global_step}.' ) else: A__ = {"""state_dict""": None} A__ = None A__ = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=__a , eval_mode=__a )["""model"""] return model, global_step
274
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = (DEISMultistepScheduler,) __lowerCamelCase : Union[str, Any] = (('''num_inference_steps''', 25),) def a_ ( self : int , **__lowerCAmelCase : List[str] ) -> int: """simple docstring""" A__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """solver_order""": 2, } config.update(**__lowerCAmelCase ) return config def a_ ( self : List[Any] , __lowerCAmelCase : Any=0 , **__lowerCAmelCase : Optional[Any] ) -> int: """simple docstring""" A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**__lowerCAmelCase ) A__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) A__ = scheduler_class.from_pretrained(__lowerCAmelCase ) new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample A__ = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def a_ ( self : str ) -> List[Any]: """simple docstring""" pass def a_ ( self : Union[str, Any] , __lowerCAmelCase : Any=0 , **__lowerCAmelCase : Any ) -> Dict: """simple docstring""" A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCAmelCase ) scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__lowerCAmelCase ) A__ = scheduler_class.from_pretrained(__lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample A__ = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def a_ ( self : Any , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : int ) -> Any: """simple docstring""" if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**__lowerCAmelCase ) A__ = scheduler_class(**__lowerCAmelCase ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**__lowerCAmelCase ) A__ = scheduler_class(**__lowerCAmelCase ) A__ = 10 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): A__ = model(__lowerCAmelCase , __lowerCAmelCase ) A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample return sample def a_ ( self : List[str] ) -> Any: """simple docstring""" A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase ) for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**__lowerCAmelCase ) A__ = self.dummy_sample A__ = 0.1 * sample if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(__lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ): A__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] A__ = dummy_past_residuals[: scheduler.config.solver_order] A__ = scheduler.timesteps[5] A__ = scheduler.timesteps[6] A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = DEISMultistepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=__lowerCAmelCase ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=__lowerCAmelCase ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def a_ ( self : int ) -> List[Any]: """simple docstring""" for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase ) def a_ ( self : Tuple ) -> int: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , algorithm_type="""deis""" , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase ) def a_ ( self : List[str] ) -> Any: """simple docstring""" for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , ) A__ = self.full_loop( solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , algorithm_type=__lowerCAmelCase , ) assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers" def a_ ( self : Union[str, Any] ) -> Any: """simple docstring""" self.check_over_configs(lower_order_final=__lowerCAmelCase ) self.check_over_configs(lower_order_final=__lowerCAmelCase ) def a_ ( self : str ) -> Union[str, Any]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 ) def a_ ( self : Dict ) -> Optional[Any]: """simple docstring""" A__ = self.full_loop() A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.full_loop(prediction_type="""v_prediction""" ) A__ = torch.mean(torch.abs(__lowerCAmelCase ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def a_ ( self : Optional[Any] ) -> int: """simple docstring""" A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**__lowerCAmelCase ) A__ = 10 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(__lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): A__ = model(__lowerCAmelCase , __lowerCAmelCase ) A__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
274
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A : Tuple = None A : Optional[Any] = logging.get_logger(__name__) A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A : List[str] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } A : List[str] = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } A : Optional[int] = '''▁''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = AlbertTokenizer def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" A__ = ( AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token ) super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
274
1
from itertools import permutations def __lowerCamelCase ( __a :tuple ) -> bool: """simple docstring""" if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A__ = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__a ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def __lowerCamelCase ( __a :int = 1_0 ) -> int: """simple docstring""" return sum( int("""""".join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
274
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
1
from manim import * class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def a_ ( self : str ) -> Any: """simple docstring""" A__ = Rectangle(height=0.5 , width=0.5 ) A__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) A__ = Rectangle(height=0.2_5 , width=0.2_5 ) A__ = [mem.copy() for i in range(6 )] A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""CPU""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCAmelCase ) A__ = [mem.copy() for i in range(4 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""GPU""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCAmelCase ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""Model""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCAmelCase ) A__ = [] A__ = [] for i, rect in enumerate(__lowerCAmelCase ): A__ = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 ) target.move_to(__lowerCAmelCase ) model_arr.append(__lowerCAmelCase ) A__ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__lowerCAmelCase ) self.add(*__lowerCAmelCase , *__lowerCAmelCase ) A__ = [meta_mem.copy() for i in range(6 )] A__ = [meta_mem.copy() for i in range(6 )] A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 ) A__ = Text("""Disk""" , font_size=24 ) A__ = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase ) disk.move_to([-4, -1.2_5, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) A__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A__ = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCAmelCase , __lowerCAmelCase ) A__ = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowerCAmelCase ) A__ = MarkupText( f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase ) ) A__ = Square(0.3 ) input.set_fill(__lowerCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 ) self.play(Write(__lowerCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.0_2 ) self.play(MoveToTarget(__lowerCAmelCase ) ) self.play(FadeOut(__lowerCAmelCase ) ) A__ = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) A__ = MarkupText( f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) ) A__ = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.0_2} self.play( Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) A__ = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , __lowerCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) A__ = AnimationGroup( FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(__lowerCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: A__ = 0.7 self.play( Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) A__ = a_c A__ = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , ) A__ = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) ) self.wait()
274
from math import ceil def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int: """simple docstring""" A__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): A__ = 2 * i + 1 A__ = 2 * i A__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
274
1
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) A : Tuple = logging.getLogger(__name__) def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple: """simple docstring""" A__ = np.argmax(__a , axis=1 ) return np.sum(outputs == labels ) def __lowerCamelCase ( __a :Tuple ) -> Dict: """simple docstring""" with open(__a , encoding="""utf_8""" ) as f: A__ = csv.reader(__a ) A__ = [] next(__a ) # skip the first line for line in tqdm(__a ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]: """simple docstring""" A__ = [] for dataset in encoded_datasets: A__ = len(__a ) A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) A__ = np.zeros((n_batch, 2) , dtype=np.intaa ) A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) A__ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__a ): A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = with_conta A__ = with_conta A__ = len(__a ) - 1 A__ = len(__a ) - 1 A__ = with_conta A__ = with_conta A__ = mc_label A__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) ) return tensor_datasets def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=__a , default="""""" ) parser.add_argument("""--eval_dataset""" , type=__a , default="""""" ) parser.add_argument("""--seed""" , type=__a , default=4_2 ) parser.add_argument("""--num_train_epochs""" , type=__a , default=3 ) parser.add_argument("""--train_batch_size""" , type=__a , default=8 ) parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=__a , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=__a , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=__a , default=0.01 ) parser.add_argument("""--lm_coef""" , type=__a , default=0.9 ) parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 ) parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) A__ = parser.parse_args() print(__a ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A__ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(__a , __a ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset A__ = ["""_start_""", """_delimiter_""", """_classify_"""] A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__a ) A__ = tokenizer.convert_tokens_to_ids(__a ) A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__a ) ) model.to(__a ) # Load and encode the datasets def tokenize_and_encode(__a :Tuple ): if isinstance(__a , __a ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) ) elif isinstance(__a , __a ): return obj return [tokenize_and_encode(__a ) for o in obj] logger.info("""Encoding dataset...""" ) A__ = load_rocstories_dataset(args.train_dataset ) A__ = load_rocstories_dataset(args.eval_dataset ) A__ = (train_dataset, eval_dataset) A__ = tokenize_and_encode(__a ) # Compute the max input length for the Transformer A__ = model.config.n_positions // 2 - 2 A__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders A__ = pre_process_datasets(__a , __a , __a , *__a ) A__ , A__ = tensor_datasets[0], tensor_datasets[1] A__ = TensorDataset(*__a ) A__ = RandomSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size ) A__ = TensorDataset(*__a ) A__ = SequentialSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: A__ = args.max_steps A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1 else: A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs A__ = list(model.named_parameters() ) A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] A__ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon ) A__ = get_linear_schedule_with_warmup( __a , num_warmup_steps=args.warmup_steps , num_training_steps=__a ) if args.do_train: A__ , A__ , A__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): A__ = 0 A__ = 0 A__ = tqdm(__a , desc="""Training""" ) for step, batch in enumerate(__a ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() A__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` A__ = os.path.join(args.output_dir , __a ) A__ = os.path.join(args.output_dir , __a ) torch.save(model_to_save.state_dict() , __a ) model_to_save.config.to_json_file(__a ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__a ) if args.do_eval: model.eval() A__ , A__ = 0, 0 A__ , A__ = 0, 0 for batch in tqdm(__a , desc="""Evaluating""" ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch with torch.no_grad(): A__ , A__ , A__ , A__ = model( __a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = mc_logits.detach().cpu().numpy() A__ = mc_labels.to("""cpu""" ).numpy() A__ = accuracy(__a , __a ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 A__ = eval_loss / nb_eval_steps A__ = eval_accuracy / nb_eval_examples A__ = tr_loss / nb_tr_steps if args.do_train else None A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} A__ = os.path.join(args.output_dir , """eval_results.txt""" ) with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , __a , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
274
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class A : '''simple docstring''' @property def a_ ( self : int ) -> Optional[Any]: """simple docstring""" return self.get_dummy_input() @property def a_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' ) def a_ ( self : Any , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : str=False , __lowerCAmelCase : str=False , ) -> Tuple: """simple docstring""" A__ = 4 A__ = 32 A__ = (32, 32) A__ = torch.manual_seed(0 ) A__ = torch.device(__lowerCAmelCase ) A__ = (batch_size, num_channels) + sizes A__ = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase ) A__ = {"""hidden_states""": hidden_states} if include_temb: A__ = 1_28 A__ = randn_tensor((batch_size, temb_channels) , generator=__lowerCAmelCase , device=__lowerCAmelCase ) if include_res_hidden_states_tuple: A__ = torch.manual_seed(1 ) A__ = (randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=__lowerCAmelCase ),) if include_encoder_hidden_states: A__ = floats_tensor((batch_size, 32, 32) ).to(__lowerCAmelCase ) if include_skip_sample: A__ = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCAmelCase , device=__lowerCAmelCase ) return dummy_input def a_ ( self : Optional[int] ) -> int: """simple docstring""" A__ = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 1_28, } if self.block_type == "up": A__ = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) A__ = self.dummy_input return init_dict, inputs_dict def a_ ( self : Tuple , __lowerCAmelCase : Optional[Any] ) -> Optional[int]: """simple docstring""" A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.block_class(**__lowerCAmelCase ) unet_block.to(__lowerCAmelCase ) unet_block.eval() with torch.no_grad(): A__ = unet_block(**__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = output[0] self.assertEqual(output.shape , self.output_shape ) A__ = output[0, -1, -3:, -3:] A__ = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase ) assert torch_all_close(output_slice.flatten() , __lowerCAmelCase , atol=5e-3 ) @unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" ) def a_ ( self : Tuple ) -> Any: """simple docstring""" A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.block_class(**__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.train() A__ = model(**__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = output[0] A__ = torch.device(__lowerCAmelCase ) A__ = randn_tensor(output.shape , device=__lowerCAmelCase ) A__ = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase ) loss.backward()
274
import argparse from collections import defaultdict import yaml A : str = '''docs/source/en/_toctree.yml''' def __lowerCamelCase ( __a :str ) -> List[Any]: """simple docstring""" A__ = defaultdict(__a ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__a ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) A__ = sorted(__a , key=lambda __a : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__a ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__a ) # Sort return overview_doc def __lowerCamelCase ( __a :Any=False ) -> List[str]: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(__a ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(__a ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(__a ) # sort overall pipeline doc A__ = clean_doc_toc(__a ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A : Optional[Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
274
1
import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values A : List[str] = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') A, A : Optional[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') A : Optional[Any] = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: A : Optional[int] = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) A : Optional[Any] = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
274
def __lowerCamelCase ( __a :str ) -> list: """simple docstring""" A__ = [0] * len(__a ) for i in range(1 , len(__a ) ): # use last results for better performance - dynamic programming A__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: A__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 A__ = j return prefix_result def __lowerCamelCase ( __a :str ) -> int: """simple docstring""" return max(prefix_function(__a ) ) if __name__ == "__main__": import doctest doctest.testmod()
274
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging A : str = logging.get_logger(__name__) A : Union[str, Any] = {'''vocab_file''': '''spiece.model'''} A : Any = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } A : List[str] = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) A : Dict = 0 A : Any = 1 A : str = 2 A : Optional[int] = 3 A : str = 4 class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : int = '''left''' def __init__( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int="<s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : Optional[Any]="<unk>" , __lowerCAmelCase : Union[str, Any]="<sep>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Union[str, Any]="<cls>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Dict=["<eop>", "<eod>"] , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : int , ) -> None: """simple docstring""" A__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , ) A__ = 3 A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @property def a_ ( self : List[Any] ) -> Tuple: """simple docstring""" return len(self.sp_model ) def a_ ( self : str ) -> str: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ) -> str: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ ( self : List[Any] , __lowerCAmelCase : int ) -> str: """simple docstring""" if self.remove_space: A__ = """ """.join(inputs.strip().split() ) else: A__ = inputs A__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: A__ = unicodedata.normalize("""NFKD""" , __lowerCAmelCase ) A__ = """""".join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] ) if self.do_lower_case: A__ = outputs.lower() return outputs def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" A__ = self.preprocess_text(__lowerCAmelCase ) A__ = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) A__ = [] for piece in pieces: if len(__lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ = cur_pieces[1:] else: A__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__lowerCAmelCase ) else: new_pieces.append(__lowerCAmelCase ) return new_pieces def a_ ( self : Tuple , __lowerCAmelCase : Dict ) -> List[Any]: """simple docstring""" return self.sp_model.PieceToId(__lowerCAmelCase ) def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] ) -> Dict: """simple docstring""" return self.sp_model.IdToPiece(__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" A__ = """""".join(__lowerCAmelCase ).replace(__lowerCAmelCase , """ """ ).strip() return out_string def a_ ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : int , ) -> str: """simple docstring""" A__ = kwargs.pop("""use_source_tokenizer""" , __lowerCAmelCase ) A__ = self.convert_ids_to_tokens(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A__ = [] A__ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCAmelCase ) ) A__ = [] sub_texts.append(__lowerCAmelCase ) else: current_sub_text.append(__lowerCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowerCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens A__ = """""".join(__lowerCAmelCase ) A__ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A__ = self.clean_up_tokenization(__lowerCAmelCase ) return clean_text else: return text def a_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is not None: return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] return ([0] * len(__lowerCAmelCase )) + [1, 1] def a_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def a_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
274
def __lowerCamelCase ( __a :int = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A__ = limit + 1 A__ = [0] * limit for first_term in range(1 , __a ): for n in range(__a , __a , __a ): A__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A__ = sum(1 for x in frequency[1:limit] if x == 1_0 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
274
1
from __future__ import annotations from collections import Counter from random import random class A : '''simple docstring''' def __init__( self : str ) -> str: """simple docstring""" A__ = {} def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = {} def a_ ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : float ) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(__lowerCAmelCase ) if nodea not in self.connections: self.add_node(__lowerCAmelCase ) A__ = probability def a_ ( self : Any ) -> list[str]: """simple docstring""" return list(self.connections ) def a_ ( self : Optional[Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" A__ = 0 A__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __lowerCamelCase ( __a :str , __a :list[tuple[str, str, float]] , __a :int ) -> dict[str, int]: """simple docstring""" A__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__a , __a , __a ) A__ = Counter(graph.get_nodes() ) A__ = start for _ in range(__a ): A__ = graph.transition(__a ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
274
class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A : '''simple docstring''' def __init__( self : List[Any] ) -> str: """simple docstring""" A__ = [ [], [], [], ] def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(__lowerCAmelCase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : Tuple ) -> str: """simple docstring""" return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class A : '''simple docstring''' def __init__( self : int ) -> str: """simple docstring""" A__ = [] def a_ ( self : int , __lowerCAmelCase : int ) -> None: """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(__lowerCAmelCase ) def a_ ( self : List[str] ) -> int: """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: A__ = min(self.queue ) self.queue.remove(__lowerCAmelCase ) return data def __str__( self : List[Any] ) -> str: """simple docstring""" return str(self.queue ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __lowerCamelCase ( ) -> int: """simple docstring""" A__ = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
274
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : int = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''gpt_neo''' __lowerCamelCase : Dict = ['''past_key_values'''] __lowerCamelCase : str = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : int , __lowerCAmelCase : Union[str, Any]=5_02_57 , __lowerCAmelCase : Tuple=20_48 , __lowerCAmelCase : Tuple=20_48 , __lowerCAmelCase : Union[str, Any]=24 , __lowerCAmelCase : List[Any]=[[["global", "local"], 12]] , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=2_56 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : int=1e-5 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=5_02_56 , __lowerCAmelCase : Dict=5_02_56 , **__lowerCAmelCase : List[Any] , ) -> Dict: """simple docstring""" A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_layers A__ = num_heads A__ = intermediate_size A__ = window_size A__ = activation_function A__ = resid_dropout A__ = embed_dropout A__ = attention_dropout A__ = classifier_dropout A__ = layer_norm_epsilon A__ = initializer_range A__ = use_cache A__ = bos_token_id A__ = eos_token_id A__ = attention_types A__ = self.expand_attention_types_params(__lowerCAmelCase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' f'`config.num_layers = {self.num_layers}`. ' """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) @staticmethod def a_ ( __lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" A__ = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __lowerCamelCase ( __a :Union[str, Any] , __a :List[str] , __a :List[str] , __a :List[Any] ) -> Optional[Any]: """simple docstring""" import torch A__ = input.size() A__ = len(__a ) A__ = shape[dimension] A__ = torch.arange(0 , __a , __a ) A__ = torch.div(sizedim - size , __a , rounding_mode="""floor""" ) + 1 A__ = torch.arange(__a ) + low_indices[:min_length][:, None] A__ = [slice(__a )] * rank A__ = indices A__ = input[s] A__ = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__a ) def __lowerCamelCase ( __a :Optional[int] , __a :int ) -> Dict: """simple docstring""" import torch A__ = torch.arange(1 , __a ) A__ = torch.remainder(__a , __a ) A__ = remainders == 0 A__ = candidates[divisor_indices] A__ = torch.max(__a ) return largest_divisor, torch.div(__a , __a , rounding_mode="""floor""" ) class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" A__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(__lowerCAmelCase , direction="""inputs""" ) A__ = {0: """batch""", 1: """past_sequence + sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return common_inputs @property def a_ ( self : Union[str, Any] ) -> int: """simple docstring""" return self._config.num_heads def a_ ( self : Tuple , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" A__ = super(__lowerCAmelCase , self ).generate_dummy_inputs( __lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase ) # We need to order the input in the way they appears in the forward() A__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch A__ , A__ = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values A__ = seqlen + 2 A__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) A__ = [ (torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers ) ] A__ = common_inputs["""attention_mask"""] if self.use_past: A__ = ordered_inputs["""attention_mask"""].dtype A__ = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 ) return ordered_inputs @property def a_ ( self : Dict ) -> int: """simple docstring""" return 13
274
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) A__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : str ) -> Any: """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : Tuple ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
274
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) A : List[str] = logging.get_logger(__name__) A : Optional[Any] = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) A : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( __a :str ) -> int: """simple docstring""" for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: A__ = model_type_to_module_name(__a ) A__ = importlib.import_module(F'.{module_name}' , """transformers.models""" ) try: return getattr(__a , __a ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(__a , """__name__""" , __a ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. A__ = importlib.import_module("""transformers""" ) if hasattr(__a , __a ): return getattr(__a , __a ) return None def __lowerCamelCase ( __a :Union[str, os.PathLike] , __a :Optional[Union[str, os.PathLike]] = None , __a :bool = False , __a :bool = False , __a :Optional[Dict[str, str]] = None , __a :Optional[Union[bool, str]] = None , __a :Optional[str] = None , __a :bool = False , **__a :Tuple , ) -> List[str]: """simple docstring""" A__ = get_file_from_repo( __a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , ) if resolved_config_file is None: logger.info( """Could not locate the image processor configuration file, will try to use the model config instead.""" ) return {} with open(__a , encoding="""utf-8""" ) as reader: return json.load(__a ) class A : '''simple docstring''' def __init__( self : int ) -> Dict: """simple docstring""" raise EnvironmentError( """AutoImageProcessor is designed to be instantiated """ """using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" ) @classmethod @replace_list_option_in_docstrings(__lowerCAmelCase ) def a_ ( cls : str , __lowerCAmelCase : str , **__lowerCAmelCase : Dict ) -> Any: """simple docstring""" A__ = kwargs.pop("""config""" , __lowerCAmelCase ) A__ = kwargs.pop("""trust_remote_code""" , __lowerCAmelCase ) A__ = True A__ , A__ = ImageProcessingMixin.get_image_processor_dict(__lowerCAmelCase , **__lowerCAmelCase ) A__ = config_dict.get("""image_processor_type""" , __lowerCAmelCase ) A__ = None if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ): A__ = config_dict["""auto_map"""]["""AutoImageProcessor"""] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: A__ = config_dict.pop("""feature_extractor_type""" , __lowerCAmelCase ) if feature_extractor_class is not None: logger.warning( """Could not find image processor class in the image processor config or the model config. Loading""" """ based on pattern matching with the model's feature extractor configuration.""" ) A__ = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" ) if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ): A__ = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] A__ = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" ) logger.warning( """Could not find image processor auto map in the image processor config or the model config.""" """ Loading based on pattern matching with the model's feature extractor configuration.""" ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): A__ = AutoConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase ) # It could be in `config.image_processor_type`` A__ = getattr(__lowerCAmelCase , """image_processor_type""" , __lowerCAmelCase ) if hasattr(__lowerCAmelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map: A__ = config.auto_map["""AutoImageProcessor"""] if image_processor_class is not None: A__ = image_processor_class_from_name(__lowerCAmelCase ) A__ = image_processor_auto_map is not None A__ = image_processor_class is not None or type(__lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING A__ = resolve_trust_remote_code( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if has_remote_code and trust_remote_code: A__ = get_class_from_dynamic_module( __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) A__ = kwargs.pop("""code_revision""" , __lowerCAmelCase ) if os.path.isdir(__lowerCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__lowerCAmelCase ) in IMAGE_PROCESSOR_MAPPING: A__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCAmelCase )] return image_processor_class.from_dict(__lowerCAmelCase , **__lowerCAmelCase ) raise ValueError( f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ' f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ' f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' ) @staticmethod def a_ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> Dict: """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(__lowerCAmelCase , __lowerCAmelCase )
274
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time A : Dict = Lock() def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__a ) process_lock.release() # receive your right neighbor's value process_lock.acquire() A__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left A__ = min(__a , __a ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__a ) process_lock.release() # receive your left neighbor's value process_lock.acquire() A__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right A__ = max(__a , __a ) # after all swaps are performed, send the values back to main result_pipe[1].send(__a ) def __lowerCamelCase ( __a :List[str] ) -> int: """simple docstring""" A__ = [] A__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) A__ = temp_rs A__ = temp_rr for i in range(1 , len(__a ) - 1 ): A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) A__ = temp_rs A__ = temp_rr process_array_.append( Process( target=__a , args=( len(__a ) - 1, arr[len(__a ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__a ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__a ) ): A__ = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCamelCase ( ) -> str: """simple docstring""" A__ = list(range(1_0 , 0 , -1 ) ) print("""Initial List""" ) print(*__a ) A__ = odd_even_transposition(__a ) print("""Sorted List\n""" ) print(*__a ) if __name__ == "__main__": main()
274
1
from __future__ import annotations def __lowerCamelCase ( __a :list[int | float] , __a :int , __a :int ) -> int | float: """simple docstring""" if len(__a ) == 0: raise ValueError("""find_max() arg is an empty sequence""" ) if ( left >= len(__a ) or left < -len(__a ) or right >= len(__a ) or right < -len(__a ) ): raise IndexError("""list index out of range""" ) if left == right: return nums[left] A__ = (left + right) >> 1 # the middle A__ = find_max(__a , __a , __a ) # find max in range[left, mid] A__ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
274
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowerCamelCase ( __a :Dict ) -> Any: """simple docstring""" A__ = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__a , __a ) def __lowerCamelCase ( __a :str ) -> Union[str, Any]: """simple docstring""" A__ , A__ = emb.weight.shape A__ = nn.Linear(__a , __a , bias=__a ) A__ = emb.weight.data return lin_layer def __lowerCamelCase ( __a :str ) -> List[str]: """simple docstring""" A__ = torch.load(__a , map_location="""cpu""" ) A__ = Namespace(**checkpoint["""cfg"""]["""model"""] ) A__ = checkpoint["""model"""] remove_ignore_keys_(__a ) A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0] A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} A__ = XGLMConfig( vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A__ = XGLMForCausalLM(__a ) A__ = model.load_state_dict(__a , strict=__a ) print(__a ) A__ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": A : int = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') A : str = parser.parse_args() A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
274
1
import os import jsonlines import numpy as np from tqdm import tqdm A : Tuple = 2_0_4_8 A : Union[str, Any] = 4_0_9_6 A : int = 4_2 A : Any = os.environ.pop('''PROCESS_TRAIN''', '''false''') A : Dict = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def __lowerCamelCase ( __a :Optional[Any] ) -> Dict: """simple docstring""" def choose_first(__a :int , __a :List[str]=False ): assert isinstance(__a , __a ) if len(__a ) == 1: A__ = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: A__ = {k: [a[k]] for k in a} if len(a["""start_token"""] ) > 0: break return a A__ = {"""id""": example["""id"""]} A__ = example["""annotations"""] A__ = annotation["""yes_no_answer"""] if 0 in yes_no_answer or 1 in yes_no_answer: A__ = ["""yes"""] if 1 in yes_no_answer else ["""no"""] A__ = A__ = [] A__ = A__ = [] A__ = ["""<cls>"""] else: A__ = ["""short"""] A__ = choose_first(annotation["""short_answers"""] ) if len(out["""start_token"""] ) == 0: # answer will be long if short is not available A__ = ["""long"""] A__ = choose_first(annotation["""long_answer"""] , is_long_answer=__a ) A__ = [] answer.update(__a ) # disregard some samples if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]: A__ = True else: A__ = False A__ = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""] if not all(isinstance(answer[k] , __a ) for k in cols ): raise ValueError("""Issue in ID""" , example["""id"""] ) return answer def __lowerCamelCase ( __a :Union[str, Any] , __a :Tuple=False ) -> Union[str, Any]: """simple docstring""" A__ = _get_single_answer(__a ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element A__ = example["""document"""]["""tokens"""] A__ = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) return { "context": " ".join(__a ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples A__ = ["""start_token""", """end_token"""] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 A__ = example["""document"""]["""tokens"""] A__ = answer["""start_token"""] A__ = answer["""end_token"""] A__ = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 A__ = """ """.join(context[start_token:end_token] ) # checking above code if assertion: A__ = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]] A__ = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]] A__ = """ """.join([old[i] for i in range(len(__a ) ) if not is_html[i]] ) if new != old: print("""ID:""" , example["""id"""] ) print("""New:""" , __a , end="""\n""" ) print("""Old:""" , __a , end="""\n\n""" ) return { "context": " ".join(__a ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def __lowerCamelCase ( __a :Tuple , __a :Optional[Any] , __a :List[str]=2_0_4_8 , __a :str=4_0_9_6 , __a :Any=True ) -> str: """simple docstring""" A__ = get_context_and_ans(__a , assertion=__a ) A__ = out["""answer"""] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } A__ = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids A__ = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element A__ = [] A__ = [] A__ = input_ids[:q_len] A__ = range(__a , len(__a ) , max_length - doc_stride ) for i in doc_start_indices: A__ = i + max_length - q_len A__ = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["""category"""][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(__a ), "end_token": [-1_0_0] * len(__a ), "category": category, }, } A__ = out["""context"""].split() A__ = splitted_context[answer["""end_token"""]] A__ = len( tokenizer( """ """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=__a , ).input_ids ) A__ = len( tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=__a ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token A__ = len(tokenizer(__a , add_special_tokens=__a ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 A__ = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive A__ = answer["""start_token"""] A__ = answer["""end_token"""] if assertion: A__ = tokenizer.decode(__a ) if answer["span"] != new: print("""ISSUE IN TOKENIZATION""" ) print("""OLD:""" , answer["""span"""] ) print("""NEW:""" , __a , end="""\n\n""" ) if len(__a ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } A__ = input_ids[:q_len] A__ = range(__a , len(__a ) , max_length - doc_stride ) A__ = [] A__ = [] A__ = [] A__ = [] # null, yes, no, long, short for i in doc_start_indices: A__ = i + max_length - q_len A__ = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: A__ = start_token - i + q_len A__ = end_token - i + q_len answers_category.append(answer["""category"""][0] ) # ["short"] -> "short" else: A__ = -1_0_0 A__ = -1_0_0 answers_category.append("""null""" ) A__ = inputs[-1][start_token : end_token + 1] answers_start_token.append(__a ) answers_end_token.append(__a ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("""ISSUE in strided for ID:""" , example["""id"""] ) print("""New:""" , tokenizer.decode(__a ) ) print("""Old:""" , tokenizer.decode(__a ) , end="""\n\n""" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def __lowerCamelCase ( __a :int , __a :Any , __a :Optional[int]=2_0_4_8 , __a :Dict=4_0_9_6 , __a :int=False ) -> Union[str, Any]: """simple docstring""" A__ = get_strided_contexts_and_ans( __a , __a , doc_stride=__a , max_length=__a , assertion=__a , ) return example def __lowerCamelCase ( __a :List[str] , __a :Any ) -> Dict: """simple docstring""" with jsonlines.open(__a , """a""" ) as writer: for example in tqdm(__a , total=len(__a ) , desc="""Saving samples ... """ ): A__ = example["""labels"""] for ids, start, end, cat in zip( example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { """input_ids""": ids, """start_token""": start, """end_token""": end, """category""": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer A : Optional[int] = load_dataset('''natural_questions''') A : Tuple = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') A : Optional[int] = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] A : Tuple = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } A : Any = data.map(prepare_inputs, fn_kwargs=fn_kwargs) A : Union[str, Any] = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) A : Optional[int] = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
274
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A (unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def a_ ( self : Any ) -> str: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = FlaxAlbertModelTester(self ) @slow def a_ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""albert-base-v2""" ) A__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase ) @require_flax class A (unittest.TestCase ): '''simple docstring''' @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCAmelCase ) A__ = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
274
1
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def __lowerCamelCase ( __a :Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: """simple docstring""" A__ = [] A__ = [] A__ = [] for rt in rc.restypes: A__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) A__ = {name: i for i, name in enumerate(__a )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) A__ = torch.tensor( __a , dtype=torch.intaa , device=protein["""aatype"""].device , ) A__ = torch.tensor( __a , dtype=torch.intaa , device=protein["""aatype"""].device , ) A__ = torch.tensor( __a , dtype=torch.floataa , device=protein["""aatype"""].device , ) A__ = protein["""aatype"""].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein A__ = restype_atomaa_to_atomaa[protein_aatype] A__ = restype_atomaa_mask[protein_aatype] A__ = residx_atomaa_mask A__ = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back A__ = restype_atomaa_to_atomaa[protein_aatype] A__ = residx_atomaa_to_atomaa.long() # create the corresponding mask A__ = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device ) for restype, restype_letter in enumerate(rc.restypes ): A__ = rc.restype_atoa[restype_letter] A__ = rc.residue_atoms[restype_name] for atom_name in atom_names: A__ = rc.atom_order[atom_name] A__ = 1 A__ = restype_atomaa_mask[protein_aatype] A__ = residx_atomaa_mask return protein def __lowerCamelCase ( __a :Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]: """simple docstring""" A__ = tree_map(lambda __a : torch.tensor(__a , device=batch["""aatype"""].device ) , __a , np.ndarray ) A__ = tensor_tree_map(lambda __a : np.array(__a ) , make_atomaa_masks(__a ) ) return out
274
from sklearn.metrics import fa_score import datasets A : Any = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' A : List[Any] = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' A : List[Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]: """simple docstring""" A__ = fa_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase ) return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
274
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata A : Dict = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class A (tr.AbstractTransform ): '''simple docstring''' def __init__( self : Any , __lowerCAmelCase : str = " " ) -> Any: """simple docstring""" A__ = sentence_delimiter def a_ ( self : Any , __lowerCAmelCase : str ) -> int: """simple docstring""" return list(__lowerCAmelCase ) def a_ ( self : Any , __lowerCAmelCase : List[str] ) -> List[str]: """simple docstring""" A__ = [] for sent_idx, sentence in enumerate(__lowerCAmelCase ): chars.extend(self.process_string(__lowerCAmelCase ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCAmelCase ) - 1: chars.append(self.sentence_delimiter ) return chars A : Union[str, Any] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: A : int = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) A : List[str] = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' A : Optional[int] = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' A : Optional[Any] = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Optional[int] ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", """https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""", ] , ) def a_ ( self : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : List[str]=False ) -> Any: """simple docstring""" if concatenate_texts: return jiwer.compute_measures( __lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )["wer"] A__ = 0 A__ = 0 for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ): A__ = jiwer.compute_measures( __lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
274
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : int = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''xlm-roberta''' def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
274
1
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel A : List[Any] = False A : Optional[int] = True A : Tuple = False if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument( '''--repo_path''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') A : Dict = parser.parse_args() A : Any = { '''image_size''': '''sample_size''', '''num_res_blocks''': '''layers_per_block''', '''block_channels''': '''block_out_channels''', '''down_blocks''': '''down_block_types''', '''up_blocks''': '''up_block_types''', '''downscale_freq_shift''': '''freq_shift''', '''resnet_num_groups''': '''norm_num_groups''', '''resnet_act_fn''': '''act_fn''', '''resnet_eps''': '''norm_eps''', '''num_head_channels''': '''attention_head_dim''', } A : Dict = { '''time_steps''': '''time_proj''', '''mid''': '''mid_block''', '''downsample_blocks''': '''down_blocks''', '''upsample_blocks''': '''up_blocks''', } A : Union[str, Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet''' with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader: A : int = reader.read() A : Any = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, '''config.json'''): A : str = UNetaDModel(**config) else: A : int = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel A : Optional[Any] = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) A : Optional[Any] = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: A : Optional[int] = config[key] del config[key] A : str = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']] A : str = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']] if do_only_weights: A : List[str] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin''')) A : Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''): continue A : str = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('''.''')[0] == key: A : Optional[int] = param_value A : str = True if not has_changed: A : List[str] = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
274
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean A : str = 0 A : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right A : Union[str, Any] = tuple[int, int] class A : '''simple docstring''' def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None: """simple docstring""" A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = g_cost A__ = parent A__ = self.calculate_heuristic() A__ = self.g_cost + self.h_cost def a_ ( self : Dict ) -> float: """simple docstring""" A__ = self.pos_x - self.goal_x A__ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : int , __lowerCAmelCase : Node ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple: """simple docstring""" A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase ) A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase ) A__ = [self.start] A__ = [] A__ = False def a_ ( self : List[str] ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__lowerCAmelCase ) self.closed_nodes.append(__lowerCAmelCase ) A__ = self.get_successors(__lowerCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCAmelCase ) else: self.open_nodes.append(__lowerCAmelCase ) return [self.start.pos] def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]: """simple docstring""" A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) ) return successors def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]: """simple docstring""" A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None: """simple docstring""" A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = False def a_ ( self : int ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A__ = self.fwd_astar.open_nodes.pop(0 ) A__ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __lowerCAmelCase , __lowerCAmelCase ) self.fwd_astar.closed_nodes.append(__lowerCAmelCase ) self.bwd_astar.closed_nodes.append(__lowerCAmelCase ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = astar.open_nodes.pop( astar.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__lowerCAmelCase ) else: astar.open_nodes.append(__lowerCAmelCase ) return [self.fwd_astar.start.pos] def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]: """simple docstring""" A__ = self.fwd_astar.retrace_path(__lowerCAmelCase ) A__ = self.bwd_astar.retrace_path(__lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] A : Optional[int] = (0, 0) A : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Dict = time.time() A : Optional[Any] = AStar(init, goal) A : Optional[int] = a_star.search() A : Optional[int] = time.time() - start_time print(F'''AStar execution time = {end_time:f} seconds''') A : Dict = time.time() A : Tuple = BidirectionalAStar(init, goal) A : List[Any] = time.time() - bd_start_time print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
274
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline A : Union[str, Any] = { '''n_samples''': 6_4, '''horizon''': 3_2, '''num_inference_steps''': 2_0, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": A : str = '''hopper-medium-v2''' A : Union[str, Any] = gym.make(env_name) A : Any = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) A : Optional[Any] = env.reset() A : Union[str, Any] = 0 A : Tuple = 0 A : List[Any] = 1_0_0_0 A : Dict = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy A : Union[str, Any] = pipeline(obs, planning_horizon=3_2) # execute action in environment A, A, A, A : Optional[Any] = env.step(denorm_actions) A : Any = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' F''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) A : Dict = next_observation except KeyboardInterrupt: pass print(F'''Total reward: {total_reward}''')
274
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Any ) -> Union[str, Any]: """simple docstring""" A__ = ["""a""", """b""", """c"""] # Defaults to last layer if both are None A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""c"""] ) self.assertEqual(__lowerCAmelCase , [2] ) # Out indices set to match out features A__ , A__ = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features set to match out indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features selected from negative indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [-3, -1] ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase ) # Out features must be a list with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = BackboneMixin() A__ = ["""a""", """b""", """c"""] A__ = ["""a""", """c"""] A__ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly A__ = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) A__ = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
274
1
from __future__ import annotations from collections.abc import MutableSequence class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : MutableSequence[float] ) -> None: """simple docstring""" if len(__lowerCAmelCase ) != degree + 1: raise ValueError( """The number of coefficients should be equal to the degree + 1.""" ) A__ = list(__lowerCAmelCase ) A__ = degree def __add__( self : Dict , __lowerCAmelCase : Polynomial ) -> Polynomial: """simple docstring""" if self.degree > polynomial_a.degree: A__ = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , __lowerCAmelCase ) else: A__ = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , __lowerCAmelCase ) def __sub__( self : str , __lowerCAmelCase : Polynomial ) -> Polynomial: """simple docstring""" return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : int ) -> Polynomial: """simple docstring""" return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Optional[Any] , __lowerCAmelCase : Polynomial ) -> Polynomial: """simple docstring""" A__ = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , __lowerCAmelCase ) def a_ ( self : Union[str, Any] , __lowerCAmelCase : int | float ) -> int | float: """simple docstring""" A__ = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Optional[int] ) -> str: """simple docstring""" A__ = """""" for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCAmelCase ) return polynomial def __repr__( self : Any ) -> str: """simple docstring""" return self.__str__() def a_ ( self : Any ) -> Polynomial: """simple docstring""" A__ = [0] * self.degree for i in range(self.degree ): A__ = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , __lowerCAmelCase ) def a_ ( self : Tuple , __lowerCAmelCase : int | float = 0 ) -> Polynomial: """simple docstring""" A__ = [0] * (self.degree + 2) A__ = constant for i in range(self.degree + 1 ): A__ = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , __lowerCAmelCase ) def __eq__( self : Optional[Any] , __lowerCAmelCase : object ) -> bool: """simple docstring""" if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Any , __lowerCAmelCase : object ) -> bool: """simple docstring""" return not self.__eq__(__lowerCAmelCase )
274
from collections import deque class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" A__ = process_name # process name A__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A__ = arrival_time A__ = burst_time # remaining burst time A__ = 0 # total time of the process wait in ready queue A__ = 0 # time from arrival time to completion time class A : '''simple docstring''' def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None: """simple docstring""" A__ = number_of_queues # time slice of queues that round robin algorithm applied A__ = time_slices # unfinished process is in this ready_queue A__ = queue # current time A__ = current_time # finished process is in this sequence queue A__ = deque() def a_ ( self : Dict ) -> list[str]: """simple docstring""" A__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]: """simple docstring""" return [q.burst_time for q in queue] def a_ ( self : Any , __lowerCAmelCase : Process ) -> int: """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]: """simple docstring""" A__ = deque() # sequence deque of finished process while len(__lowerCAmelCase ) != 0: A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A__ = 0 # set the process's turnaround time because it is finished A__ = self.current_time - cp.arrival_time # set the completion time A__ = self.current_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: """simple docstring""" A__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCAmelCase ) ): A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A__ = 0 # set the finish time A__ = self.current_time # update the process' turnaround time because it is finished A__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a_ ( self : List[Any] ) -> deque[Process]: """simple docstring""" for i in range(self.number_of_queues - 1 ): A__ , A__ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Union[str, Any] = Process('''P1''', 0, 5_3) A : Optional[Any] = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : int = Process('''P4''', 0, 2_4) A : Any = 3 A : List[Any] = [1_7, 2_5] A : Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) A : Optional[Any] = Process('''P1''', 0, 5_3) A : int = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : Tuple = Process('''P4''', 0, 2_4) A : Union[str, Any] = 3 A : Optional[Any] = [1_7, 2_5] A : Tuple = deque([Pa, Pa, Pa, Pa]) A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0) A : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
274
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed A : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(4_2) A : Union[str, Any] = '''sshleifer/student_marian_en_ro_6_1''' A : Any = '''sshleifer/tiny-mbart''' @require_torch class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' def a_ ( self : Any , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , ) -> Union[str, Any]: """simple docstring""" A__ = self.run_trainer( eval_steps=1 , max_len=12 , model_name=__lowerCAmelCase , num_train_epochs=1 , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , predict_with_generate=__lowerCAmelCase , do_train=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , ) A__ = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , """trainer_state.json""" ) ).log_history if not do_eval: return A__ = [log for log in logs if """eval_loss""" in log.keys()] A__ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats A__ = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , __lowerCAmelCase ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def a_ ( self : Tuple ) -> Dict: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def a_ ( self : List[str] ) -> Any: """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @require_torch_multi_gpu def a_ ( self : int ) -> Tuple: """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def a_ ( self : List[Any] ) -> List[str]: """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def a_ ( self : Tuple ) -> Tuple: """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__lowerCAmelCase ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def a_ ( self : int ) -> Dict: """simple docstring""" self.run_seqaseq_quick( distributed=__lowerCAmelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__lowerCAmelCase ) @require_apex @require_torch_gpu def a_ ( self : Dict ) -> str: """simple docstring""" self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=__lowerCAmelCase , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def a_ ( self : Tuple , __lowerCAmelCase : Dict ) -> Tuple: """simple docstring""" A__ = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } A__ = experiments[experiment_id] A__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} A__ = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**__lowerCAmelCase , extra_args_str=data["""extra_args_str"""] ) A__ = len(re.findall(__lowerCAmelCase , cl.err ) ) self.assertEqual(__lowerCAmelCase , data["""n_matches"""] ) @slow def a_ ( self : List[str] ) -> int: """simple docstring""" A__ = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=__lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__lowerCAmelCase , ) # Check metrics A__ = TrainerState.load_from_json(os.path.join(__lowerCAmelCase , """trainer_state.json""" ) ).log_history A__ = [log for log in logs if """eval_loss""" in log.keys()] A__ = eval_metrics[0] A__ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , __lowerCAmelCase ) # test if do_predict saves generations and metrics A__ = os.listdir(__lowerCAmelCase ) A__ = {os.path.basename(__lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def a_ ( self : List[Any] ) -> Any: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(__lowerCAmelCase : str ) -> Tuple[int, float]: A__ = """--skip_memory_metrics 0""" A__ = self.run_trainer( max_len=1_28 , model_name=__lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCAmelCase , distributed=__lowerCAmelCase , extra_args_str=__lowerCAmelCase , do_eval=__lowerCAmelCase , do_predict=__lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics A__ = TrainerState.load_from_json(Path(__lowerCAmelCase , """trainer_state.json""" ) ).log_history A__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) A__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) A__ = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss A__ , A__ , A__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) A__ , A__ , A__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) A__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb A__ = gpu_peak_mem_orig + gpu_alloc_mem_orig A__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb A__ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings A__ = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and' f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , ) self.assertGreater( __lowerCAmelCase , __lowerCAmelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and' f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , ) self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' ) def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : float = 3e-3 , __lowerCAmelCase : str = "adafactor" , __lowerCAmelCase : bool = False , __lowerCAmelCase : str = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : bool = True , __lowerCAmelCase : int = None , ) -> Dict: """simple docstring""" A__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" A__ = self.get_auto_remove_tmp_dir() A__ = f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(__lowerCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(__lowerCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split() A__ = f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(__lowerCAmelCase )}\n '.split() A__ = """ --do_predict """.split() A__ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f'--optim {optim}'.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: A__ = get_gpu_count() A__ = get_torch_dist_unique_port() A__ = f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split() A__ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__lowerCAmelCase , env=self.get_env() ) else: A__ = ["""run_translation.py"""] + args with patch.object(__lowerCAmelCase , """argv""" , __lowerCAmelCase ): main() return output_dir
274
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A : str = logging.get_logger(__name__) A : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[Any] = '''layoutlmv3''' def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = max_ad_position_embeddings A__ = coordinate_size A__ = shape_size A__ = has_relative_attention_bias A__ = rel_pos_bins A__ = max_rel_pos A__ = has_spatial_attention_bias A__ = rel_ad_pos_bins A__ = max_rel_ad_pos A__ = text_embed A__ = visual_embed A__ = input_size A__ = num_channels A__ = patch_size A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = version.parse('''1.12''' ) @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def a_ ( self : Optional[int] ) -> float: """simple docstring""" return 1e-5 @property def a_ ( self : Tuple ) -> int: """simple docstring""" return 12 def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes A__ = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
274
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets A : Tuple = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' A : int = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' A : Optional[Any] = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Tuple ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , ) def a_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> str: """simple docstring""" A__ = 0.0 for i, j in zip(__lowerCAmelCase , __lowerCAmelCase ): n_correct += 1.0 if math_equivalence.is_equiv(__lowerCAmelCase , __lowerCAmelCase ) else 0.0 A__ = n_correct / len(__lowerCAmelCase ) return { "accuracy": accuracy, }
274
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = '''▁''' A : Any = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A : List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } A : Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off A : Optional[int] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None: """simple docstring""" A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = language_codes A__ = FAIRSEQ_LANGUAGE_CODES[language_codes] A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} A__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = load_json(__lowerCAmelCase ) A__ = {v: k for k, v in self.encoder.items()} A__ = spm_file A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) A__ = len(self.encoder ) A__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} A__ = {v: k for k, v in self.lang_token_to_id.items()} A__ = src_lang if src_lang is not None else """en""" A__ = tgt_lang A__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A__ = num_madeup_words @property def a_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def a_ ( self : Optional[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = [] A__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : int ) -> Dict: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = load_spm(self.spm_file , self.sp_model_kwargs ) def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding: """simple docstring""" A__ = src_lang A__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A__ = src_lang A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.get_lang_id(__lowerCAmelCase ) A__ = tgt_lang_id return inputs def a_ ( self : Dict ) -> int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" return self.lang_code_to_token[lang] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" A__ = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __lowerCamelCase ( __a :str ) -> Union[Dict, List]: """simple docstring""" with open(__a , """r""" ) as f: return json.load(__a ) def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None: """simple docstring""" with open(__a , """w""" ) as f: json.dump(__a , __a , indent=2 )
274
1
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run A : List[str] = True except (ImportError, AttributeError): A : int = object def __lowerCamelCase ( *__a :List[str] , **__a :Tuple ) -> str: """simple docstring""" pass A : Tuple = False A : Any = logging.get_logger('''transformers-cli/serving''') def __lowerCamelCase ( __a :Namespace ) -> str: """simple docstring""" A__ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(__a , args.host , args.port , args.workers ) class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : dict class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] __lowerCamelCase : Optional[List[int]] class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def a_ ( __lowerCAmelCase : ArgumentParser ) -> List[Any]: """simple docstring""" A__ = parser.add_parser( """serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" ) serve_parser.add_argument( """--task""" , type=__lowerCAmelCase , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , ) serve_parser.add_argument("""--host""" , type=__lowerCAmelCase , default="""localhost""" , help="""Interface the server will listen on.""" ) serve_parser.add_argument("""--port""" , type=__lowerCAmelCase , default=88_88 , help="""Port the serving will listen to.""" ) serve_parser.add_argument("""--workers""" , type=__lowerCAmelCase , default=1 , help="""Number of http workers""" ) serve_parser.add_argument("""--model""" , type=__lowerCAmelCase , help="""Model's name or path to stored model.""" ) serve_parser.add_argument("""--config""" , type=__lowerCAmelCase , help="""Model's config name or path to stored model.""" ) serve_parser.add_argument("""--tokenizer""" , type=__lowerCAmelCase , help="""Tokenizer name to use.""" ) serve_parser.add_argument( """--device""" , type=__lowerCAmelCase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) serve_parser.set_defaults(func=__lowerCAmelCase ) def __init__( self : int , __lowerCAmelCase : Pipeline , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Optional[Any]: """simple docstring""" A__ = pipeline A__ = host A__ = port A__ = workers if not _serve_dependencies_installed: raise RuntimeError( """Using serve command requires FastAPI and uvicorn. """ """Please install transformers with [serving]: pip install \"transformers[serving]\".""" """Or install FastAPI and uvicorn separately.""" ) else: logger.info(f'Serving model over {host}:{port}' ) A__ = FastAPI( routes=[ APIRoute( """/""" , self.model_info , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""GET"""] , ), APIRoute( """/tokenize""" , self.tokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ), APIRoute( """/detokenize""" , self.detokenize , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ), APIRoute( """/forward""" , self.forward , response_model=__lowerCAmelCase , response_class=__lowerCAmelCase , methods=["""POST"""] , ), ] , timeout=6_00 , ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" run(self._app , host=self.host , port=self.port , workers=self.workers ) def a_ ( self : Dict ) -> Optional[Any]: """simple docstring""" return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def a_ ( self : int , __lowerCAmelCase : str = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , __lowerCAmelCase : bool = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ) -> int: """simple docstring""" try: A__ = self._pipeline.tokenizer.tokenize(__lowerCAmelCase ) if return_ids: A__ = self._pipeline.tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) return ServeTokenizeResult(tokens=__lowerCAmelCase , tokens_ids=__lowerCAmelCase ) else: return ServeTokenizeResult(tokens=__lowerCAmelCase ) except Exception as e: raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(__lowerCAmelCase )} ) def a_ ( self : Optional[Any] , __lowerCAmelCase : List[int] = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , __lowerCAmelCase : bool = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , __lowerCAmelCase : bool = Body(__lowerCAmelCase , embed=__lowerCAmelCase ) , ) -> Optional[int]: """simple docstring""" try: A__ = self._pipeline.tokenizer.decode(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return ServeDeTokenizeResult(model="""""" , text=__lowerCAmelCase ) except Exception as e: raise HTTPException(status_code=5_00 , detail={"""model""": """""", """error""": str(__lowerCAmelCase )} ) async def a_ ( self : Dict , __lowerCAmelCase : str=Body(__lowerCAmelCase , embed=__lowerCAmelCase ) ) -> int: """simple docstring""" if len(__lowerCAmelCase ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model A__ = self._pipeline(__lowerCAmelCase ) return ServeForwardResult(output=__lowerCAmelCase ) except Exception as e: raise HTTPException(5_00 , {"""error""": str(__lowerCAmelCase )} )
274
from __future__ import annotations from PIL import Image # Define glider example A : Any = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]: """simple docstring""" A__ = [] for i in range(len(__a ) ): A__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__a ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__a ) - 1: neighbour_count += cells[i + 1][j] if i < len(__a ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__a ) return next_generation def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]: """simple docstring""" A__ = [] for _ in range(__a ): # Create output image A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) ) A__ = img.load() # Save cells to image for x in range(len(__a ) ): for y in range(len(cells[0] ) ): A__ = 2_5_5 - cells[y][x] * 2_5_5 A__ = (colour, colour, colour) # Save image images.append(__a ) A__ = new_generation(__a ) return images if __name__ == "__main__": A : str = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
274
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = AutoencoderKL __lowerCamelCase : Optional[Any] = '''sample''' __lowerCamelCase : Optional[Any] = 1E-2 @property def a_ ( self : str ) -> int: """simple docstring""" A__ = 4 A__ = 3 A__ = (32, 32) A__ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase ) return {"sample": image} @property def a_ ( self : Dict ) -> Optional[Any]: """simple docstring""" return (3, 32, 32) @property def a_ ( self : List[Any] ) -> int: """simple docstring""" return (3, 32, 32) def a_ ( self : int ) -> Any: """simple docstring""" A__ = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } A__ = self.dummy_input return init_dict, inputs_dict def a_ ( self : List[str] ) -> str: """simple docstring""" pass def a_ ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" ) def a_ ( self : Dict ) -> int: """simple docstring""" A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.model_class(**__lowerCAmelCase ) model.to(__lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training A__ = model(**__lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() A__ = torch.randn_like(__lowerCAmelCase ) A__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing A__ = self.model_class(**__lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training A__ = model_a(**__lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() A__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) A__ = dict(model.named_parameters() ) A__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def a_ ( self : Dict ) -> List[str]: """simple docstring""" A__ , A__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(__lowerCAmelCase ) A__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def a_ ( self : str ) -> Tuple: """simple docstring""" A__ = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) A__ = model.to(__lowerCAmelCase ) model.eval() if torch_device == "mps": A__ = torch.manual_seed(0 ) else: A__ = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 ) A__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) A__ = image.to(__lowerCAmelCase ) with torch.no_grad(): A__ = model(__lowerCAmelCase , sample_posterior=__lowerCAmelCase , generator=__lowerCAmelCase ).sample A__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": A__ = torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": A__ = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: A__ = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1e-2 ) ) @slow class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[Any]: """simple docstring""" return f'gaussian_noise_s={seed}_shape={"_".join([str(__lowerCAmelCase ) for s in shape] )}.npy' def a_ ( self : List[str] ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : int , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Dict=(4, 3, 5_12, 5_12) , __lowerCAmelCase : List[Any]=False ) -> Any: """simple docstring""" A__ = torch.floataa if fpaa else torch.floataa A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) ).to(__lowerCAmelCase ).to(__lowerCAmelCase ) return image def a_ ( self : Optional[int] , __lowerCAmelCase : Optional[int]="CompVis/stable-diffusion-v1-4" , __lowerCAmelCase : List[str]=False ) -> Optional[Any]: """simple docstring""" A__ = """fp16""" if fpaa else None A__ = torch.floataa if fpaa else torch.floataa A__ = AutoencoderKL.from_pretrained( __lowerCAmelCase , subfolder="""vae""" , torch_dtype=__lowerCAmelCase , revision=__lowerCAmelCase , ) model.to(__lowerCAmelCase ).eval() return model def a_ ( self : Optional[int] , __lowerCAmelCase : Tuple=0 ) -> Any: """simple docstring""" if torch_device == "mps": return torch.manual_seed(__lowerCAmelCase ) return torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def a_ ( self : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = self.get_sd_vae_model() A__ = self.get_sd_image(__lowerCAmelCase ) A__ = self.get_generator(__lowerCAmelCase ) with torch.no_grad(): A__ = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def a_ ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = self.get_sd_vae_model(fpaa=__lowerCAmelCase ) A__ = self.get_sd_image(__lowerCAmelCase , fpaa=__lowerCAmelCase ) A__ = self.get_generator(__lowerCAmelCase ) with torch.no_grad(): A__ = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(__lowerCAmelCase ) assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" A__ = self.get_sd_vae_model() A__ = self.get_sd_image(__lowerCAmelCase ) with torch.no_grad(): A__ = model(__lowerCAmelCase ).sample assert sample.shape == image.shape A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() A__ = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def a_ ( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ) -> Union[str, Any]: """simple docstring""" A__ = self.get_sd_vae_model() A__ = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(__lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] A__ = sample[-1, -2:, :2, -2:].flatten().cpu() A__ = torch.tensor(__lowerCAmelCase ) assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def a_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> List[str]: """simple docstring""" A__ = self.get_sd_vae_model(fpaa=__lowerCAmelCase ) A__ = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=__lowerCAmelCase ) with torch.no_grad(): A__ = model.decode(__lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() A__ = torch.tensor(__lowerCAmelCase ) assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def a_ ( self : List[str] , __lowerCAmelCase : int ) -> Any: """simple docstring""" A__ = self.get_sd_vae_model(fpaa=__lowerCAmelCase ) A__ = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=__lowerCAmelCase ) with torch.no_grad(): A__ = model.decode(__lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(__lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def a_ ( self : Optional[Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_sd_vae_model() A__ = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): A__ = model.decode(__lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): A__ = model.decode(__lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" A__ = self.get_sd_vae_model() A__ = self.get_sd_image(__lowerCAmelCase ) A__ = self.get_generator(__lowerCAmelCase ) with torch.no_grad(): A__ = model.encode(__lowerCAmelCase ).latent_dist A__ = dist.sample(generator=__lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] A__ = sample[0, -1, -3:, -3:].flatten().cpu() A__ = torch.tensor(__lowerCAmelCase ) A__ = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase )
274
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A : List[str] = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A : Dict = requests.get(image_url).content A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
274
1
import math def __lowerCamelCase ( ) -> None: """simple docstring""" A__ = input("""Enter message: """ ) A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) ) A__ = input("""Encryption/Decryption [e/d]: """ ) if mode.lower().startswith("""e""" ): A__ = encrypt_message(__a , __a ) elif mode.lower().startswith("""d""" ): A__ = decrypt_message(__a , __a ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F'Output:\n{text + "|"}' ) def __lowerCamelCase ( __a :int , __a :str ) -> str: """simple docstring""" A__ = [""""""] * key for col in range(__a ): A__ = col while pointer < len(__a ): cipher_text[col] += message[pointer] pointer += key return "".join(__a ) def __lowerCamelCase ( __a :int , __a :str ) -> str: """simple docstring""" A__ = math.ceil(len(__a ) / key ) A__ = key A__ = (num_cols * num_rows) - len(__a ) A__ = [""""""] * num_cols A__ = 0 A__ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): A__ = 0 row += 1 return "".join(__a ) if __name__ == "__main__": import doctest doctest.testmod() main()
274
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
1
import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def __lowerCamelCase ( __a :Optional[Any] , __a :str , __a :str , __a :Path , __a :str = None , __a :str = None , __a :str = None , ) -> Any: """simple docstring""" if config_name_or_path is None: A__ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: A__ = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: A__ = question_encoder_name_or_path A__ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. A__ = RagConfig.from_pretrained(__a ) A__ = AutoConfig.from_pretrained(__a ) A__ = AutoConfig.from_pretrained(__a ) A__ = gen_config A__ = question_encoder_config A__ = model_class.from_pretrained_question_encoder_generator( __a , __a , config=__a ) rag_model.save_pretrained(__a ) # Sanity check. model_class.from_pretrained(__a ) # Save tokenizers. A__ = AutoTokenizer.from_pretrained(__a ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) A__ = AutoTokenizer.from_pretrained(__a ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": A : List[str] = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) A : int = parser.parse_args() A : List[str] = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
274
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A : Tuple = None A : Optional[Any] = logging.get_logger(__name__) A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A : List[str] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } A : List[str] = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } A : Optional[int] = '''▁''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = AlbertTokenizer def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" A__ = ( AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token ) super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
274
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : Any = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Dict = ['''GLPNFeatureExtractor'''] A : Dict = ['''GLPNImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : str = [ '''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GLPNForDepthEstimation''', '''GLPNLayer''', '''GLPNModel''', '''GLPNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
274
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A : '''simple docstring''' def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=3 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : Tuple=[8, 16, 32, 64] , __lowerCAmelCase : int=[1, 1, 2, 1] , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="relu" , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , __lowerCAmelCase : Dict=[2, 3, 4] , __lowerCAmelCase : Optional[Any]=1 , ) -> Any: """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = embeddings_size A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = hidden_act A__ = num_labels A__ = scope A__ = len(__lowerCAmelCase ) A__ = out_features A__ = out_indices A__ = num_groups def a_ ( self : str ) -> Dict: """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def a_ ( self : Dict ) -> Tuple: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def a_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" A__ = BitModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> Any: """simple docstring""" A__ = self.num_labels A__ = BitForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Dict: """simple docstring""" A__ = BitBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A__ = None A__ = BitBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() A__ = model(__lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCamelCase : Tuple = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) __lowerCamelCase : int = False __lowerCamelCase : Optional[Any] = False __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[str] = False __lowerCamelCase : List[str] = False def a_ ( self : Any ) -> Any: """simple docstring""" A__ = BitModelTester(self ) A__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def a_ ( self : Optional[int] ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : str ) -> Dict: """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def a_ ( self : Dict ) -> str: """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" pass def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(__lowerCAmelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCAmelCase ) def a_ ( self : str ) -> List[str]: """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(config=__lowerCAmelCase ) for name, module in model.named_modules(): if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def a_ ( self : Tuple ) -> List[str]: """simple docstring""" def check_hidden_states_output(__lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ): A__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: A__ = layer_type A__ = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def a_ ( self : Tuple ) -> str: """simple docstring""" pass def a_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) @slow def a_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = BitModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __lowerCamelCase ( ) -> List[str]: """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A (unittest.TestCase ): '''simple docstring''' @cached_property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): A__ = model(**__lowerCAmelCase ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) A__ = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) ) @require_torch class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = (BitBackbone,) if is_torch_available() else () __lowerCamelCase : str = BitConfig __lowerCamelCase : List[Any] = False def a_ ( self : List[Any] ) -> List[Any]: """simple docstring""" A__ = BitModelTester(self )
274
from math import ceil def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int: """simple docstring""" A__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): A__ = 2 * i + 1 A__ = 2 * i A__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
274
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def __lowerCamelCase ( __a :int ) -> Tuple: """simple docstring""" A__ = SwinvaConfig() A__ = swinva_name.split("""_""" ) A__ = name_split[1] if "to" in name_split[3]: A__ = int(name_split[3][-3:] ) else: A__ = int(name_split[3] ) if "to" in name_split[2]: A__ = int(name_split[2][-2:] ) else: A__ = int(name_split[2][6:] ) if model_size == "tiny": A__ = 9_6 A__ = (2, 2, 6, 2) A__ = (3, 6, 1_2, 2_4) elif model_size == "small": A__ = 9_6 A__ = (2, 2, 1_8, 2) A__ = (3, 6, 1_2, 2_4) elif model_size == "base": A__ = 1_2_8 A__ = (2, 2, 1_8, 2) A__ = (4, 8, 1_6, 3_2) else: A__ = 1_9_2 A__ = (2, 2, 1_8, 2) A__ = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: A__ = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): A__ = 2_1_8_4_1 A__ = """huggingface/label-files""" A__ = """imagenet-22k-id2label.json""" A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) ) A__ = {int(__a ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} else: A__ = 1_0_0_0 A__ = """huggingface/label-files""" A__ = """imagenet-1k-id2label.json""" A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) ) A__ = {int(__a ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = img_size A__ = num_classes A__ = embed_dim A__ = depths A__ = num_heads A__ = window_size return config def __lowerCamelCase ( __a :List[str] ) -> Tuple: """simple docstring""" if "patch_embed.proj" in name: A__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: A__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: A__ = """encoder.""" + name if "attn.proj" in name: A__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: A__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: A__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: A__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: A__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: A__ = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: A__ = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: A__ = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: A__ = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: A__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if name == "norm.weight": A__ = """layernorm.weight""" if name == "norm.bias": A__ = """layernorm.bias""" if "head" in name: A__ = name.replace("""head""" , """classifier""" ) else: A__ = """swinv2.""" + name return name def __lowerCamelCase ( __a :Tuple , __a :List[str] ) -> int: """simple docstring""" for key in orig_state_dict.copy().keys(): A__ = orig_state_dict.pop(__a ) if "mask" in key: continue elif "qkv" in key: A__ = key.split(""".""" ) A__ = int(key_split[1] ) A__ = int(key_split[3] ) A__ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A__ = val[:dim, :] A__ = val[dim : dim * 2, :] A__ = val[-dim:, :] else: A__ = val[:dim] A__ = val[ dim : dim * 2 ] A__ = val[-dim:] else: A__ = val return orig_state_dict def __lowerCamelCase ( __a :str , __a :Optional[Any] ) -> Dict: """simple docstring""" A__ = timm.create_model(__a , pretrained=__a ) timm_model.eval() A__ = get_swinva_config(__a ) A__ = SwinvaForImageClassification(__a ) model.eval() A__ = convert_state_dict(timm_model.state_dict() , __a ) model.load_state_dict(__a ) A__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" A__ = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) ) A__ = Image.open(requests.get(__a , stream=__a ).raw ) A__ = image_processor(images=__a , return_tensors="""pt""" ) A__ = timm_model(inputs["""pixel_values"""] ) A__ = model(**__a ).logits assert torch.allclose(__a , __a , atol=1E-3 ) print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__a ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__a ) model.push_to_hub( repo_path_or_name=Path(__a , __a ) , organization="""nandwalritik""" , commit_message="""Add model""" , ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A : Optional[int] = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
274
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) A : Tuple = logging.getLogger(__name__) def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple: """simple docstring""" A__ = np.argmax(__a , axis=1 ) return np.sum(outputs == labels ) def __lowerCamelCase ( __a :Tuple ) -> Dict: """simple docstring""" with open(__a , encoding="""utf_8""" ) as f: A__ = csv.reader(__a ) A__ = [] next(__a ) # skip the first line for line in tqdm(__a ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]: """simple docstring""" A__ = [] for dataset in encoded_datasets: A__ = len(__a ) A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) A__ = np.zeros((n_batch, 2) , dtype=np.intaa ) A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) A__ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__a ): A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = with_conta A__ = with_conta A__ = len(__a ) - 1 A__ = len(__a ) - 1 A__ = with_conta A__ = with_conta A__ = mc_label A__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) ) return tensor_datasets def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=__a , default="""""" ) parser.add_argument("""--eval_dataset""" , type=__a , default="""""" ) parser.add_argument("""--seed""" , type=__a , default=4_2 ) parser.add_argument("""--num_train_epochs""" , type=__a , default=3 ) parser.add_argument("""--train_batch_size""" , type=__a , default=8 ) parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=__a , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=__a , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=__a , default=0.01 ) parser.add_argument("""--lm_coef""" , type=__a , default=0.9 ) parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 ) parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) A__ = parser.parse_args() print(__a ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A__ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(__a , __a ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset A__ = ["""_start_""", """_delimiter_""", """_classify_"""] A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__a ) A__ = tokenizer.convert_tokens_to_ids(__a ) A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__a ) ) model.to(__a ) # Load and encode the datasets def tokenize_and_encode(__a :Tuple ): if isinstance(__a , __a ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) ) elif isinstance(__a , __a ): return obj return [tokenize_and_encode(__a ) for o in obj] logger.info("""Encoding dataset...""" ) A__ = load_rocstories_dataset(args.train_dataset ) A__ = load_rocstories_dataset(args.eval_dataset ) A__ = (train_dataset, eval_dataset) A__ = tokenize_and_encode(__a ) # Compute the max input length for the Transformer A__ = model.config.n_positions // 2 - 2 A__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders A__ = pre_process_datasets(__a , __a , __a , *__a ) A__ , A__ = tensor_datasets[0], tensor_datasets[1] A__ = TensorDataset(*__a ) A__ = RandomSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size ) A__ = TensorDataset(*__a ) A__ = SequentialSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: A__ = args.max_steps A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1 else: A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs A__ = list(model.named_parameters() ) A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] A__ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon ) A__ = get_linear_schedule_with_warmup( __a , num_warmup_steps=args.warmup_steps , num_training_steps=__a ) if args.do_train: A__ , A__ , A__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): A__ = 0 A__ = 0 A__ = tqdm(__a , desc="""Training""" ) for step, batch in enumerate(__a ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() A__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` A__ = os.path.join(args.output_dir , __a ) A__ = os.path.join(args.output_dir , __a ) torch.save(model_to_save.state_dict() , __a ) model_to_save.config.to_json_file(__a ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__a ) if args.do_eval: model.eval() A__ , A__ = 0, 0 A__ , A__ = 0, 0 for batch in tqdm(__a , desc="""Evaluating""" ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch with torch.no_grad(): A__ , A__ , A__ , A__ = model( __a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = mc_logits.detach().cpu().numpy() A__ = mc_labels.to("""cpu""" ).numpy() A__ = accuracy(__a , __a ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 A__ = eval_loss / nb_eval_steps A__ = eval_accuracy / nb_eval_examples A__ = tr_loss / nb_tr_steps if args.do_train else None A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} A__ = os.path.join(args.output_dir , """eval_results.txt""" ) with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , __a , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
274
1
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def __lowerCamelCase ( __a :str , __a :str , __a :Union[str, Any]=0 ) -> str: """simple docstring""" if name is None: A__ = None else: A__ = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}""" A__ = fmt.format(__a ) # Print and recurse (if needed). if isinstance(__a , __a ): if msg is not None: print(__a ) for k in val.keys(): recursive_print(__a , val[k] , spaces + 2 ) elif isinstance(__a , torch.Tensor ): print(__a , """:""" , val.size() ) else: print(__a , """:""" , __a ) def __lowerCamelCase ( __a :Optional[int] , __a :Tuple , __a :Optional[Any] , __a :Tuple , __a :Optional[int] ) -> List[Any]: """simple docstring""" A__ = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] A__ = (num_heads, hidden_size, num_splits) + input_shape[1:] A__ = param.view(*__a ) A__ = param.transpose(0 , 2 ) A__ = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] A__ = (num_heads, num_splits, hidden_size) + input_shape[1:] A__ = param.view(*__a ) A__ = param.transpose(0 , 1 ).contiguous() A__ = param.view(*__a ) return param def __lowerCamelCase ( __a :Tuple , __a :str , __a :Tuple ) -> Union[str, Any]: """simple docstring""" A__ = {} # old versions did not store training args A__ = input_state_dict.get("""args""" , __a ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) A__ = ds_args.padded_vocab_size A__ = ds_args.max_position_embeddings A__ = ds_args.hidden_size A__ = ds_args.num_layers A__ = ds_args.num_attention_heads A__ = ds_args.ffn_hidden_size # pprint(config) # The number of heads. A__ = config.n_head # The hidden_size per head. A__ = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): A__ = input_state_dict["""checkpoint_version"""] else: A__ = 0.0 # The model. A__ = input_state_dict["""model"""] # The language model. A__ = model["""language_model"""] # The embeddings. A__ = lm["""embedding"""] # The word embeddings. A__ = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. A__ = word_embeddings[: config.vocab_size, :] A__ = word_embeddings # The position embeddings. A__ = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] A__ = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. A__ = pos_embeddings # The transformer. A__ = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. A__ = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. A__ = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. A__ = layer_re.match(__a ) # Stop if that's not a layer if m is None: break # The index of the layer. A__ = int(m.group(1 ) ) # The name of the operation. A__ = m.group(2 ) # Is it a weight or a bias? A__ = m.group(3 ) # The name of the layer. A__ = F'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): A__ = """ln_1""" if op_name.startswith("""input""" ) else """ln_2""" A__ = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. A__ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __a , __a ) A__ = causal_mask # Insert a "dummy" tensor for masked_bias. A__ = torch.tensor(-1E4 , dtype=torch.floataa ) A__ = masked_bias A__ = fix_query_key_value_ordering(__a , __a , 3 , __a , __a ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. A__ = out_val.transpose(0 , 1 ).contiguous() # Store. A__ = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": A__ = fix_query_key_value_ordering(__a , __a , 3 , __a , __a ) # Store. No change of shape. A__ = out_val # Transpose the weights. elif weight_or_bias == "weight": A__ = megatron_to_transformers[op_name] A__ = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": A__ = megatron_to_transformers[op_name] A__ = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. A__ = transformer["""final_layernorm.weight"""] A__ = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. A__ = word_embeddings # It should be done! return output_state_dict def __lowerCamelCase ( ) -> Dict: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=__a , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=__a , help="""An optional config json file describing the pre-trained model.""" , ) A__ = parser.parse_args() # Extract the basename. A__ = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: A__ = torch.load(__a , map_location="""cpu""" ) else: A__ = torch.load(args.path_to_checkpoint , map_location="""cpu""" ) A__ = input_state_dict.get("""args""" , __a ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: A__ = """gelu_fast""" elif ds_args.openai_gelu: A__ = """gelu_new""" else: A__ = """gelu""" else: # in the very early days this used to be "gelu_new" A__ = """gelu_new""" # Spell out all parameters in case the defaults change. A__ = GPTaConfig( vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__a , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__a , summary_activation=__a , summary_proj_to_labels=__a , summary_first_dropout=0.1 , scale_attn_weights=__a , use_cache=__a , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , ) else: A__ = GPTaConfig.from_json_file(args.config_file ) A__ = ["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) A__ = convert_megatron_checkpoint(__a , __a , __a ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__a , __a ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: A__ = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": A__ = """gpt2""" elif tokenizer_type == "PretrainedFromHF": A__ = ds_args.tokenizer_name_or_path else: raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' ) else: A__ = """gpt2""" A__ = AutoTokenizer.from_pretrained(__a ) A__ = type(__a ).__name__ A__ = tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(__a ) # Save tokenizer based on args print(F'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(__a ) # Store the state_dict to file. A__ = os.path.join(__a , """pytorch_model.bin""" ) print(F'Saving checkpoint to "{output_checkpoint_file}"' ) torch.save(__a , __a ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
274
import argparse from collections import defaultdict import yaml A : str = '''docs/source/en/_toctree.yml''' def __lowerCamelCase ( __a :str ) -> List[Any]: """simple docstring""" A__ = defaultdict(__a ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__a ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) A__ = sorted(__a , key=lambda __a : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__a ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__a ) # Sort return overview_doc def __lowerCamelCase ( __a :Any=False ) -> List[str]: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(__a ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(__a ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(__a ) # sort overall pipeline doc A__ = clean_doc_toc(__a ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A : Optional[Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
274
1
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A (unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def a_ ( self : Any ) -> str: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = FlaxAlbertModelTester(self ) @slow def a_ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""albert-base-v2""" ) A__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase ) @require_flax class A (unittest.TestCase ): '''simple docstring''' @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCAmelCase ) A__ = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
274
def __lowerCamelCase ( __a :str ) -> list: """simple docstring""" A__ = [0] * len(__a ) for i in range(1 , len(__a ) ): # use last results for better performance - dynamic programming A__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: A__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 A__ = j return prefix_result def __lowerCamelCase ( __a :str ) -> int: """simple docstring""" return max(prefix_function(__a ) ) if __name__ == "__main__": import doctest doctest.testmod()
274
1
import unittest from transformers import DonutProcessor A : Optional[int] = '''naver-clova-ix/donut-base''' class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" A__ = DonutProcessor.from_pretrained(__lowerCAmelCase ) def a_ ( self : Union[str, Any] ) -> int: """simple docstring""" A__ = { """name""": """John Doe""", """age""": """99""", """city""": """Atlanta""", """state""": """GA""", """zip""": """30301""", """phone""": """123-4567""", """nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}], } A__ = ( """<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>""" """<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>""" """<s_nicknames><s_nickname>Johnny</s_nickname>""" """<sep/><s_nickname>JD</s_nickname></s_nicknames>""" ) A__ = self.processor.tokenajson(__lowerCAmelCase ) self.assertDictEqual(__lowerCAmelCase , __lowerCAmelCase )
274
def __lowerCamelCase ( __a :int = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A__ = limit + 1 A__ = [0] * limit for first_term in range(1 , __a ): for n in range(__a , __a , __a ): A__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A__ = sum(1 for x in frequency[1:limit] if x == 1_0 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
274
1
import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging A : List[str] = logging.get_logger(__name__) A : str = {'''vocab_file''': '''spiece.model'''} A : List[Any] = { '''vocab_file''': { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''', } } # TODO(PVP) - this should be removed in Transformers v5 A : List[Any] = { '''t5-small''': 5_1_2, '''t5-base''': 5_1_2, '''t5-large''': 5_1_2, '''t5-3b''': 5_1_2, '''t5-11b''': 5_1_2, } A : List[Any] = '''▁''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = VOCAB_FILES_NAMES __lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple="</s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : str=1_00 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Optional[int]=True , **__lowerCAmelCase : List[str] , ) -> None: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: A__ = [f'<extra_id_{i}>' for i in range(__lowerCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens A__ = len(set(filter(lambda __lowerCAmelCase : bool("""extra_id""" in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to' """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) A__ = legacy A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = extra_ids A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCAmelCase ) @staticmethod def a_ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> Any: """simple docstring""" if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: A__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __lowerCAmelCase , ) return max_model_length @property def a_ ( self : Any ) -> Tuple: """simple docstring""" return self.sp_model.get_piece_size() + self._extra_ids def a_ ( self : int ) -> List[str]: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__lowerCAmelCase )) + [1] return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] def a_ ( self : int ) -> Optional[Any]: """simple docstring""" return list( set(filter(lambda __lowerCAmelCase : bool(re.search(R"""<extra_id_\d+>""" , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def a_ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return [self._convert_token_to_id(__lowerCAmelCase ) for token in self.get_sentinel_tokens()] def a_ ( self : Any , __lowerCAmelCase : List[int] ) -> List[int]: """simple docstring""" if len(__lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Optional[int] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = self._add_eos_if_not_present(__lowerCAmelCase ) if token_ids_a is None: return token_ids_a else: A__ = self._add_eos_if_not_present(__lowerCAmelCase ) return token_ids_a + token_ids_a def __getstate__( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : int , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a_ ( self : List[str] , __lowerCAmelCase : "TextInput" , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" if not self.legacy: A__ = SPIECE_UNDERLINE + text.replace(__lowerCAmelCase , """ """ ) return super().tokenize(__lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Any , __lowerCAmelCase : Dict , **__lowerCAmelCase : List[str] ) -> str: """simple docstring""" if not self.legacy: A__ = text.startswith(__lowerCAmelCase ) if is_first: A__ = text[1:] A__ = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__lowerCAmelCase ): A__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def a_ ( self : Dict , __lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" if token.startswith("""<extra_id_""" ): A__ = re.match(R"""<extra_id_(\d+)>""" , __lowerCAmelCase ) A__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(__lowerCAmelCase ) def a_ ( self : List[Any] , __lowerCAmelCase : Any ) -> Optional[Any]: """simple docstring""" if index < self.sp_model.get_piece_size(): A__ = self.sp_model.IdToPiece(__lowerCAmelCase ) else: A__ = f'<extra_id_{self.vocab_size - 1 - index}>' return token def a_ ( self : Any , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" A__ = [] A__ = """""" A__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = True A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) A__ = False out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : str , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (out_vocab_file,)
274
class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A : '''simple docstring''' def __init__( self : List[Any] ) -> str: """simple docstring""" A__ = [ [], [], [], ] def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(__lowerCAmelCase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : Tuple ) -> str: """simple docstring""" return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class A : '''simple docstring''' def __init__( self : int ) -> str: """simple docstring""" A__ = [] def a_ ( self : int , __lowerCAmelCase : int ) -> None: """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(__lowerCAmelCase ) def a_ ( self : List[str] ) -> int: """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: A__ = min(self.queue ) self.queue.remove(__lowerCAmelCase ) return data def __str__( self : List[Any] ) -> str: """simple docstring""" return str(self.queue ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __lowerCamelCase ( ) -> int: """simple docstring""" A__ = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
274
1
A : Union[str, Any] = {str(digit): digit**5 for digit in range(1_0)} def __lowerCamelCase ( __a :int ) -> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__a ) ) def __lowerCamelCase ( ) -> int: """simple docstring""" return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(__a ) ) if __name__ == "__main__": print(solution())
274
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) A__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : str ) -> Any: """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : Tuple ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
274
1
A : str = tuple[float, float, float] A : int = tuple[float, float, float] def __lowerCamelCase ( __a :Pointad , __a :Pointad ) -> Vectorad: """simple docstring""" A__ = end_pointa[0] - end_pointa[0] A__ = end_pointa[1] - end_pointa[1] A__ = end_pointa[2] - end_pointa[2] return (x, y, z) def __lowerCamelCase ( __a :Vectorad , __a :Vectorad ) -> Vectorad: """simple docstring""" A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __lowerCamelCase ( __a :Vectorad , __a :int ) -> bool: """simple docstring""" return tuple(round(__a , __a ) for x in vector ) == (0, 0, 0) def __lowerCamelCase ( __a :Pointad , __a :Pointad , __a :Pointad , __a :int = 1_0 ) -> bool: """simple docstring""" A__ = create_vector(__a , __a ) A__ = create_vector(__a , __a ) return is_zero_vector(get_ad_vectors_cross(__a , __a ) , __a )
274
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time A : Dict = Lock() def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__a ) process_lock.release() # receive your right neighbor's value process_lock.acquire() A__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left A__ = min(__a , __a ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__a ) process_lock.release() # receive your left neighbor's value process_lock.acquire() A__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right A__ = max(__a , __a ) # after all swaps are performed, send the values back to main result_pipe[1].send(__a ) def __lowerCamelCase ( __a :List[str] ) -> int: """simple docstring""" A__ = [] A__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) A__ = temp_rs A__ = temp_rr for i in range(1 , len(__a ) - 1 ): A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) A__ = temp_rs A__ = temp_rr process_array_.append( Process( target=__a , args=( len(__a ) - 1, arr[len(__a ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__a ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__a ) ): A__ = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCamelCase ( ) -> str: """simple docstring""" A__ = list(range(1_0 , 0 , -1 ) ) print("""Initial List""" ) print(*__a ) A__ = odd_even_transposition(__a ) print("""Sorted List\n""" ) print(*__a ) if __name__ == "__main__": main()
274
1
import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process A : Optional[int] = logging.getLogger(__name__) A : List[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) A : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A : '''simple docstring''' __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE )} , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) __lowerCamelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def a_ ( self : Any ) -> Tuple: """simple docstring""" if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class A : '''simple docstring''' __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , ) __lowerCamelCase : Optional[str] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) __lowerCamelCase : Optional[int] = field( default=5 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) __lowerCamelCase : Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) } , ) __lowerCamelCase : Optional[int] = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) __lowerCamelCase : float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) def a_ ( self : List[str] ) -> Optional[int]: """simple docstring""" if self.train_file is not None: A__ = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A__ = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def __lowerCamelCase ( __a :Optional[Any] , __a :Optional[int] ) -> str: """simple docstring""" with open(__a , """r""" , encoding="""utf-8""" ) as f: A__ = [json.loads(__a ) for line in f.read().splitlines() if (len(__a ) > 0 and not line.isspace())] assert len(__a ) == len(__a ) A__ = {c: dataset[c] for c in dataset.column_names} A__ = refs return Dataset.from_dict(__a ) def __lowerCamelCase ( ) -> Optional[int]: """simple docstring""" A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A__ , A__ , A__ = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , __a ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , ) A__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , ) else: A__ = {} if data_args.train_file is not None: A__ = data_args.train_file if data_args.validation_file is not None: A__ = data_args.validation_file A__ = data_args.train_file.split(""".""" )[-1] if extension == "txt": A__ = """text""" A__ = load_dataset(__a , data_files=__a ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A__ = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: A__ = AutoConfig.from_pretrained(model_args.config_name , **__a ) elif model_args.model_name_or_path: A__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__a ) else: A__ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) A__ = { """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__a ) elif model_args.model_name_or_path: A__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__a ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: A__ = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) A__ = AutoModelForMaskedLM.from_config(__a ) model.resize_token_embeddings(len(__a ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A__ = datasets["""train"""].column_names else: A__ = datasets["""validation"""].column_names A__ = """text""" if """text""" in column_names else column_names[0] A__ = """max_length""" if data_args.pad_to_max_length else False def tokenize_function(__a :Union[str, Any] ): # Remove empty lines A__ = [line for line in examples["""text"""] if len(__a ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=__a , truncation=__a , max_length=data_args.max_seq_length ) A__ = datasets.map( __a , batched=__a , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A__ = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A__ = add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A__ = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A__ = False # Data collator # This one will take care of randomly masking the tokens. A__ = DataCollatorForWholeWordMask(tokenizer=__a , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A__ = Trainer( model=__a , args=__a , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__a , data_collator=__a , ) # Training if training_args.do_train: if last_checkpoint is not None: A__ = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A__ = model_args.model_name_or_path else: A__ = None A__ = trainer.train(resume_from_checkpoint=__a ) trainer.save_model() # Saves the tokenizer too for easy upload A__ = os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(__a , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation A__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) A__ = trainer.evaluate() A__ = math.exp(eval_output["""eval_loss"""] ) A__ = perplexity A__ = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) return results def __lowerCamelCase ( __a :Tuple ) -> Optional[Any]: """simple docstring""" main() if __name__ == "__main__": main()
274
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowerCamelCase ( __a :Dict ) -> Any: """simple docstring""" A__ = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__a , __a ) def __lowerCamelCase ( __a :str ) -> Union[str, Any]: """simple docstring""" A__ , A__ = emb.weight.shape A__ = nn.Linear(__a , __a , bias=__a ) A__ = emb.weight.data return lin_layer def __lowerCamelCase ( __a :str ) -> List[str]: """simple docstring""" A__ = torch.load(__a , map_location="""cpu""" ) A__ = Namespace(**checkpoint["""cfg"""]["""model"""] ) A__ = checkpoint["""model"""] remove_ignore_keys_(__a ) A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0] A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} A__ = XGLMConfig( vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A__ = XGLMForCausalLM(__a ) A__ = model.load_state_dict(__a , strict=__a ) print(__a ) A__ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": A : int = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') A : str = parser.parse_args() A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
274
1
from math import ceil def __lowerCamelCase ( __a :Tuple , __a :Union[str, Any] ) -> int: """simple docstring""" A__ = list(range(0 , __a ) ) A__ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check A__ = [] for i in device_map_blocks: if device_map_blocks.count(__a ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(__a ) # Missing blocks A__ = [i for i in blocks if i not in device_map_blocks] A__ = [i for i in device_map_blocks if i not in blocks] if len(__a ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(__a ) ) if len(__a ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(__a ) ) if len(__a ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(__a ) ) def __lowerCamelCase ( __a :List[Any] , __a :Union[str, Any] ) -> str: """simple docstring""" A__ = list(range(__a ) ) A__ = int(ceil(n_layers / len(__a ) ) ) A__ = [layers[i : i + n_blocks] for i in range(0 , __a , __a )] return dict(zip(__a , __a ) )
274
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A (unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def a_ ( self : Any ) -> str: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = FlaxAlbertModelTester(self ) @slow def a_ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""albert-base-v2""" ) A__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase ) @require_flax class A (unittest.TestCase ): '''simple docstring''' @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCAmelCase ) A__ = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
274
1
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Dict ) -> Optional[Any]: """simple docstring""" A__ = logging.get_logger() # the current default level is logging.WARNING A__ = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__lowerCAmelCase ) def a_ ( self : int ) -> str: """simple docstring""" A__ = logging.get_verbosity() A__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) A__ = """Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__lowerCAmelCase ) as cl: logger.warning(__lowerCAmelCase ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__lowerCAmelCase ) as cl: logger.warning(__lowerCAmelCase ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__lowerCAmelCase ) as cl: logger.warning(__lowerCAmelCase ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(__lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def a_ ( self : List[str] ) -> List[Any]: """simple docstring""" transformers.utils.logging._reset_library_root_logger() # this action activates the env var A__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) A__ = os.getenv("""TRANSFORMERS_VERBOSITY""" , __lowerCAmelCase ) A__ = logging.log_levels[env_level_str] A__ = logging.get_verbosity() self.assertEqual( __lowerCAmelCase , __lowerCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level A__ = """""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def a_ ( self : List[str] ) -> str: """simple docstring""" transformers.utils.logging._reset_library_root_logger() A__ = logging.logging.getLogger() with CaptureLogger(__lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def a_ ( self : Tuple ) -> int: """simple docstring""" transformers.utils.logging._reset_library_root_logger() A__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" ) A__ = """Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(__lowerCAmelCase ) as cl: logger.warning_advice(__lowerCAmelCase ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__lowerCAmelCase ) as cl: logger.warning_advice(__lowerCAmelCase ) self.assertEqual(cl.out , msg + """\n""" ) def __lowerCamelCase ( ) -> List[Any]: """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
274
from sklearn.metrics import fa_score import datasets A : Any = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' A : List[Any] = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' A : List[Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]: """simple docstring""" A__ = fa_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase ) return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
274
1
A : Tuple = [0, 2, 4, 6, 8] A : Dict = [1, 3, 5, 7, 9] def __lowerCamelCase ( __a :int , __a :int , __a :list[int] , __a :int ) -> int: """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 A__ = 0 for digit in range(1_0 ): A__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 1_0 , __a , __a ) return result A__ = 0 for digita in range(1_0 ): A__ = digita if (remainder + digita) % 2 == 0: A__ = ODD_DIGITS else: A__ = EVEN_DIGITS for digita in other_parity_digits: A__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 1_0 , __a , __a , ) return result def __lowerCamelCase ( __a :int = 9 ) -> int: """simple docstring""" A__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__a , 0 , [0] * length , __a ) return result if __name__ == "__main__": print(F'''{solution() = }''')
274
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : int = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''xlm-roberta''' def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
274
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass A : Tuple = (3, 9, -1_1, 0, 7, 5, 1, -1) A : int = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class A : '''simple docstring''' __lowerCamelCase : int __lowerCamelCase : Node | None class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : Iterable[int] ) -> None: """simple docstring""" A__ = None for i in sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ): A__ = Node(__lowerCAmelCase , self.head ) def __iter__( self : Dict ) -> Iterator[int]: """simple docstring""" A__ = self.head while node: yield node.data A__ = node.next_node def __len__( self : Union[str, Any] ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self : str ) -> str: """simple docstring""" return " -> ".join([str(__lowerCAmelCase ) for node in self] ) def __lowerCamelCase ( __a :SortedLinkedList , __a :SortedLinkedList ) -> SortedLinkedList: """simple docstring""" return SortedLinkedList(list(__a ) + list(__a ) ) if __name__ == "__main__": import doctest doctest.testmod() A : Optional[int] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
274
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean A : str = 0 A : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right A : Union[str, Any] = tuple[int, int] class A : '''simple docstring''' def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None: """simple docstring""" A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = g_cost A__ = parent A__ = self.calculate_heuristic() A__ = self.g_cost + self.h_cost def a_ ( self : Dict ) -> float: """simple docstring""" A__ = self.pos_x - self.goal_x A__ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : int , __lowerCAmelCase : Node ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple: """simple docstring""" A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase ) A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase ) A__ = [self.start] A__ = [] A__ = False def a_ ( self : List[str] ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__lowerCAmelCase ) self.closed_nodes.append(__lowerCAmelCase ) A__ = self.get_successors(__lowerCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCAmelCase ) else: self.open_nodes.append(__lowerCAmelCase ) return [self.start.pos] def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]: """simple docstring""" A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) ) return successors def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]: """simple docstring""" A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None: """simple docstring""" A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = False def a_ ( self : int ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A__ = self.fwd_astar.open_nodes.pop(0 ) A__ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __lowerCAmelCase , __lowerCAmelCase ) self.fwd_astar.closed_nodes.append(__lowerCAmelCase ) self.bwd_astar.closed_nodes.append(__lowerCAmelCase ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = astar.open_nodes.pop( astar.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__lowerCAmelCase ) else: astar.open_nodes.append(__lowerCAmelCase ) return [self.fwd_astar.start.pos] def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]: """simple docstring""" A__ = self.fwd_astar.retrace_path(__lowerCAmelCase ) A__ = self.bwd_astar.retrace_path(__lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] A : Optional[int] = (0, 0) A : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Dict = time.time() A : Optional[Any] = AStar(init, goal) A : Optional[int] = a_star.search() A : Optional[int] = time.time() - start_time print(F'''AStar execution time = {end_time:f} seconds''') A : Dict = time.time() A : Tuple = BidirectionalAStar(init, goal) A : List[Any] = time.time() - bd_start_time print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
274
1
def __lowerCamelCase ( __a :int ) -> bool: """simple docstring""" A__ = (1 + 2_4 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def __lowerCamelCase ( __a :int = 5_0_0_0 ) -> int: """simple docstring""" A__ = [(i * (3 * i - 1)) // 2 for i in range(1 , __a )] for i, pentagonal_i in enumerate(__a ): for j in range(__a , len(__a ) ): A__ = pentagonal_nums[j] A__ = pentagonal_i + pentagonal_j A__ = pentagonal_j - pentagonal_i if is_pentagonal(__a ) and is_pentagonal(__a ): return b return -1 if __name__ == "__main__": print(F'''{solution() = }''')
274
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Any ) -> Union[str, Any]: """simple docstring""" A__ = ["""a""", """b""", """c"""] # Defaults to last layer if both are None A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""c"""] ) self.assertEqual(__lowerCAmelCase , [2] ) # Out indices set to match out features A__ , A__ = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features set to match out indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features selected from negative indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [-3, -1] ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase ) # Out features must be a list with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = BackboneMixin() A__ = ["""a""", """b""", """c"""] A__ = ["""a""", """c"""] A__ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly A__ = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) A__ = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
274
1
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any=13 , __lowerCAmelCase : str=7 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Optional[int]=37 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[int]=0.0_2 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Dict="None" , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Optional[int]=4 , __lowerCAmelCase : Union[str, Any]=None , ) -> Optional[int]: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = relative_attention A__ = position_biased_input A__ = pos_att_type A__ = scope def a_ ( self : Tuple ) -> int: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ ( self : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" A__ = TFDebertaVaModel(config=__lowerCAmelCase ) A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} A__ = [input_ids, input_mask] A__ = model(__lowerCAmelCase ) A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] ) -> Tuple: """simple docstring""" A__ = TFDebertaVaForMaskedLM(config=__lowerCAmelCase ) A__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a_ ( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Dict: """simple docstring""" A__ = self.num_labels A__ = TFDebertaVaForSequenceClassification(config=__lowerCAmelCase ) A__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a_ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict ) -> Union[str, Any]: """simple docstring""" A__ = self.num_labels A__ = TFDebertaVaForTokenClassification(config=__lowerCAmelCase ) A__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a_ ( self : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> int: """simple docstring""" A__ = TFDebertaVaForQuestionAnswering(config=__lowerCAmelCase ) A__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } A__ = model(__lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a_ ( self : str ) -> Union[str, Any]: """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : List[str] = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) __lowerCamelCase : Any = ( { '''feature-extraction''': TFDebertaVaModel, '''fill-mask''': TFDebertaVaForMaskedLM, '''question-answering''': TFDebertaVaForQuestionAnswering, '''text-classification''': TFDebertaVaForSequenceClassification, '''token-classification''': TFDebertaVaForTokenClassification, '''zero-shot''': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) __lowerCamelCase : Tuple = False __lowerCamelCase : List[Any] = False def a_ ( self : List[str] ) -> int: """simple docstring""" A__ = TFDebertaVaModelTester(self ) A__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 ) def a_ ( self : Any ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def a_ ( self : Tuple ) -> Dict: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase ) def a_ ( self : Tuple ) -> Optional[Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) def a_ ( self : List[Any] ) -> List[Any]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase ) def a_ ( self : List[str] ) -> List[str]: """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) @slow def a_ ( self : List[Any] ) -> str: """simple docstring""" A__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) self.assertIsNotNone(__lowerCAmelCase ) @require_tf class A (unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="""Model not available yet""" ) def a_ ( self : Any ) -> Optional[int]: """simple docstring""" pass @slow def a_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" A__ = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" ) A__ = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) A__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = tf.constant( [[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 )
274
from collections import deque class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" A__ = process_name # process name A__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A__ = arrival_time A__ = burst_time # remaining burst time A__ = 0 # total time of the process wait in ready queue A__ = 0 # time from arrival time to completion time class A : '''simple docstring''' def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None: """simple docstring""" A__ = number_of_queues # time slice of queues that round robin algorithm applied A__ = time_slices # unfinished process is in this ready_queue A__ = queue # current time A__ = current_time # finished process is in this sequence queue A__ = deque() def a_ ( self : Dict ) -> list[str]: """simple docstring""" A__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]: """simple docstring""" return [q.burst_time for q in queue] def a_ ( self : Any , __lowerCAmelCase : Process ) -> int: """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]: """simple docstring""" A__ = deque() # sequence deque of finished process while len(__lowerCAmelCase ) != 0: A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A__ = 0 # set the process's turnaround time because it is finished A__ = self.current_time - cp.arrival_time # set the completion time A__ = self.current_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: """simple docstring""" A__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCAmelCase ) ): A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A__ = 0 # set the finish time A__ = self.current_time # update the process' turnaround time because it is finished A__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a_ ( self : List[Any] ) -> deque[Process]: """simple docstring""" for i in range(self.number_of_queues - 1 ): A__ , A__ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Union[str, Any] = Process('''P1''', 0, 5_3) A : Optional[Any] = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : int = Process('''P4''', 0, 2_4) A : Any = 3 A : List[Any] = [1_7, 2_5] A : Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) A : Optional[Any] = Process('''P1''', 0, 5_3) A : int = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : Tuple = Process('''P4''', 0, 2_4) A : Union[str, Any] = 3 A : Optional[Any] = [1_7, 2_5] A : Tuple = deque([Pa, Pa, Pa, Pa]) A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0) A : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
274
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A : str = logging.get_logger(__name__) A : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[Any] = '''layoutlmv3''' def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = max_ad_position_embeddings A__ = coordinate_size A__ = shape_size A__ = has_relative_attention_bias A__ = rel_pos_bins A__ = max_rel_pos A__ = has_spatial_attention_bias A__ = rel_ad_pos_bins A__ = max_rel_ad_pos A__ = text_embed A__ = visual_embed A__ = input_size A__ = num_channels A__ = patch_size A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = version.parse('''1.12''' ) @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def a_ ( self : Optional[int] ) -> float: """simple docstring""" return 1e-5 @property def a_ ( self : Tuple ) -> int: """simple docstring""" return 12 def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes A__ = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
274
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer A : Tuple = logging.get_logger(__name__) A : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all BART models at https://huggingface.co/models?filter=bart A : Tuple = { '''vocab_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''', }, '''merges_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''', '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''', '''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''', '''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''', '''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''', '''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''', }, } A : int = { '''facebook/bart-base''': 1_0_2_4, '''facebook/bart-large''': 1_0_2_4, '''facebook/bart-large-mnli''': 1_0_2_4, '''facebook/bart-large-cnn''': 1_0_2_4, '''facebook/bart-large-xsum''': 1_0_2_4, '''yjernite/bart_eli5''': 1_0_2_4, } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Dict = VOCAB_FILES_NAMES __lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[str] = BartTokenizer def __init__( self : Tuple , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict="replace" , __lowerCAmelCase : Optional[int]="<s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Dict="</s>" , __lowerCAmelCase : int="<s>" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : List[str]="<mask>" , __lowerCAmelCase : int=False , __lowerCAmelCase : Optional[Any]=True , **__lowerCAmelCase : Tuple , ) -> Any: """simple docstring""" super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space: A__ = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) ) A__ = add_prefix_space A__ = pre_tok_class(**__lowerCAmelCase ) A__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A__ = """post_processor""" A__ = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) if tokenizer_component_instance: A__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A__ = tuple(state["""sep"""] ) if "cls" in state: A__ = tuple(state["""cls"""] ) A__ = False if state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space: A__ = add_prefix_space A__ = True if state.get("""trim_offsets""" , __lowerCAmelCase ) != trim_offsets: A__ = trim_offsets A__ = True if changes_to_apply: A__ = getattr(__lowerCAmelCase , state.pop("""type""" ) ) A__ = component_class(**__lowerCAmelCase ) setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase ) @property def a_ ( self : Optional[int] ) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def a_ ( self : str , __lowerCAmelCase : Optional[int] ) -> int: """simple docstring""" A__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value A__ = value def a_ ( self : Union[str, Any] , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] ) -> BatchEncoding: """simple docstring""" A__ = kwargs.get("""is_split_into_words""" , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : List[str] , *__lowerCAmelCase : int , **__lowerCAmelCase : Dict ) -> BatchEncoding: """simple docstring""" A__ = kwargs.get("""is_split_into_words""" , __lowerCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=None ) -> List[Any]: """simple docstring""" A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
274
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = '''▁''' A : Any = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A : List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } A : Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off A : Optional[int] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None: """simple docstring""" A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = language_codes A__ = FAIRSEQ_LANGUAGE_CODES[language_codes] A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} A__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = load_json(__lowerCAmelCase ) A__ = {v: k for k, v in self.encoder.items()} A__ = spm_file A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) A__ = len(self.encoder ) A__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} A__ = {v: k for k, v in self.lang_token_to_id.items()} A__ = src_lang if src_lang is not None else """en""" A__ = tgt_lang A__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A__ = num_madeup_words @property def a_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def a_ ( self : Optional[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = [] A__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : int ) -> Dict: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = load_spm(self.spm_file , self.sp_model_kwargs ) def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding: """simple docstring""" A__ = src_lang A__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A__ = src_lang A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.get_lang_id(__lowerCAmelCase ) A__ = tgt_lang_id return inputs def a_ ( self : Dict ) -> int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" return self.lang_code_to_token[lang] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" A__ = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __lowerCamelCase ( __a :str ) -> Union[Dict, List]: """simple docstring""" with open(__a , """r""" ) as f: return json.load(__a ) def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None: """simple docstring""" with open(__a , """w""" ) as f: json.dump(__a , __a , indent=2 )
274
1
A : Dict = 0 # The first color of the flag. A : Any = 1 # The second color of the flag. A : Dict = 2 # The third color of the flag. A : Optional[int] = (red, white, blue) def __lowerCamelCase ( __a :list ) -> list: """simple docstring""" if not sequence: return [] if len(__a ) == 1: return list(__a ) A__ = 0 A__ = len(__a ) - 1 A__ = 0 while mid <= high: if sequence[mid] == colors[0]: A__ , A__ = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: A__ , A__ = sequence[high], sequence[mid] high -= 1 else: A__ = F'The elements inside the sequence must contains only {colors} values' raise ValueError(__a ) return sequence if __name__ == "__main__": import doctest doctest.testmod() A : List[Any] = input('''Enter numbers separated by commas:\n''').strip() A : str = [int(item.strip()) for item in user_input.split(''',''')] print(F'''{dutch_national_flag_sort(unsorted)}''')
274
from __future__ import annotations from PIL import Image # Define glider example A : Any = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]: """simple docstring""" A__ = [] for i in range(len(__a ) ): A__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__a ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__a ) - 1: neighbour_count += cells[i + 1][j] if i < len(__a ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__a ) return next_generation def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]: """simple docstring""" A__ = [] for _ in range(__a ): # Create output image A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) ) A__ = img.load() # Save cells to image for x in range(len(__a ) ): for y in range(len(cells[0] ) ): A__ = 2_5_5 - cells[y][x] * 2_5_5 A__ = (colour, colour, colour) # Save image images.append(__a ) A__ = new_generation(__a ) return images if __name__ == "__main__": A : str = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
274
1
def __lowerCamelCase ( __a :str , __a :str = " " ) -> list: """simple docstring""" A__ = [] A__ = 0 for index, char in enumerate(__a ): if char == separator: split_words.append(string[last_index:index] ) A__ = index + 1 elif index + 1 == len(__a ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
274
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A : List[str] = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A : Dict = requests.get(image_url).content A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
274
1
import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def __lowerCamelCase ( __a :Union[str, Any] ) -> Any: """simple docstring""" if hor == 1_2_8: A__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") A__ = (3_2, 1_2_8, 2_5_6) A__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 3_2: A__ = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") A__ = (3_2, 6_4, 1_2_8, 2_5_6) A__ = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") A__ = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' ) A__ = model.state_dict() A__ = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 1_4, """out_channels""": 1_4, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5_5_3_6, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } A__ = UNetaDModel(**__a ) print(F'length of state dict: {len(state_dict.keys() )}' ) print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' ) A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): A__ = state_dict.pop(__a ) hf_value_function.load_state_dict(__a ) torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' ) with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f: json.dump(__a , __a ) def __lowerCamelCase ( ) -> List[str]: """simple docstring""" A__ = { """in_channels""": 1_4, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5_5_3_6, """out_channels""": 1_4, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } A__ = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) A__ = model A__ = UNetaDModel(**__a ) print(F'length of state dict: {len(state_dict.keys() )}' ) print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' ) A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): A__ = state_dict.pop(__a ) hf_value_function.load_state_dict(__a ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(__a , __a ) if __name__ == "__main__": unet(3_2) # unet(128) value_function()
274
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
1
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class A : '''simple docstring''' @staticmethod def a_ ( *__lowerCAmelCase : int , **__lowerCAmelCase : str ) -> Union[str, Any]: """simple docstring""" pass def __lowerCamelCase ( __a :Image ) -> str: """simple docstring""" A__ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class A (unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Tuple = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" A__ = DepthEstimationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def a_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __lowerCAmelCase ) import datasets A__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" ) A__ = depth_estimator( [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] ) self.assertEqual( [ {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, {"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )}, ] , __lowerCAmelCase , ) @require_tf @unittest.skip("""Depth estimation is not implemented in TF""" ) def a_ ( self : Any ) -> List[str]: """simple docstring""" pass @slow @require_torch def a_ ( self : List[Any] ) -> List[str]: """simple docstring""" A__ = """Intel/dpt-large""" A__ = pipeline("""depth-estimation""" , model=__lowerCAmelCase ) A__ = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" ) A__ = hashimage(outputs["""depth"""] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_6_2 ) @require_torch def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
274
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A : Tuple = None A : Optional[Any] = logging.get_logger(__name__) A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A : List[str] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } A : List[str] = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } A : Optional[int] = '''▁''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = AlbertTokenizer def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" A__ = ( AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token ) super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
274
1
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Tuple = '''EncodecFeatureExtractor''' __lowerCamelCase : List[str] = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ) -> List[str]: """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) A__ = self.feature_extractor A__ = False def a_ ( self : Any , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=True ) -> List[str]: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase ) def __call__( self : Union[str, Any] , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : int ) -> str: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) A__ = kwargs.pop("""audio""" , __lowerCAmelCase ) A__ = kwargs.pop("""sampling_rate""" , __lowerCAmelCase ) A__ = kwargs.pop("""text""" , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: A__ = args[0] A__ = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: A__ = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if audio is not None: A__ = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: A__ = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: A__ = audio_inputs["""padding_mask"""] return inputs def a_ ( self : List[Any] , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : int ) -> Any: """simple docstring""" A__ = kwargs.pop("""audio""" , __lowerCAmelCase ) A__ = kwargs.pop("""padding_mask""" , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: A__ = args[0] A__ = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase ) else: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : int , *__lowerCAmelCase : int , **__lowerCAmelCase : Dict ) -> Any: """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional = None ) -> List[np.ndarray]: """simple docstring""" A__ = to_numpy(__lowerCAmelCase ) A__ , A__ , A__ = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase ) A__ = to_numpy(__lowerCAmelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) A__ = seq_len - padding_mask.shape[-1] A__ = 1 - self.feature_extractor.padding_value A__ = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase ) A__ = audio_values.tolist() for i in range(__lowerCAmelCase ): A__ = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] A__ = sliced_audio.reshape(__lowerCAmelCase , -1 ) return audio_values
274
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging A : int = '''\ ''' A : Optional[int] = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' A : List[str] = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Any: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """input_texts""": datasets.Value("""string""" ), } ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , ) def a_ ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int = 16 , __lowerCAmelCase : bool = True , __lowerCAmelCase : List[Any]=None ) -> str: """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": A__ = """cuda""" else: A__ = """cuda""" if torch.cuda.is_available() else """cpu""" A__ = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase ) A__ = model.to(__lowerCAmelCase ) A__ = AutoTokenizer.from_pretrained(__lowerCAmelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: A__ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__lowerCAmelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" A__ = model.config.max_length - 1 else: A__ = model.config.max_length A__ = tokenizer( __lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors="""pt""" , return_attention_mask=__lowerCAmelCase , ).to(__lowerCAmelCase ) A__ = encodings["""input_ids"""] A__ = encodings["""attention_mask"""] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." A__ = [] A__ = CrossEntropyLoss(reduction="""none""" ) for start_index in logging.tqdm(range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ): A__ = min(start_index + batch_size , len(__lowerCAmelCase ) ) A__ = encoded_texts[start_index:end_index] A__ = attn_masks[start_index:end_index] if add_start_token: A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCAmelCase ) A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) A__ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCAmelCase ), attn_mask] , dim=1 ) A__ = encoded_batch with torch.no_grad(): A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).logits A__ = out_logits[..., :-1, :].contiguous() A__ = labels[..., 1:].contiguous() A__ = attn_mask[..., 1:].contiguous() A__ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCAmelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCAmelCase )}
274
from math import ceil def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int: """simple docstring""" A__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): A__ = 2 * i + 1 A__ = 2 * i A__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
274
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class A (unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None @property def a_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self : Any ) -> List[str]: """simple docstring""" A__ = (3, 32, 1_28) A__ = tempfile.mkdtemp() # fmt: off A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) A__ = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 1_28}, } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[Any] , **__lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Optional[Any] , **__lowerCAmelCase : int ) -> Optional[int]: """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : int ) -> int: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : Union[str, Any] ) -> int: """simple docstring""" A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta ) A__ = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) return image_input def a_ ( self : Optional[Any] ) -> int: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_image_processor() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_image_processor() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : str ) -> List[str]: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Union[str, Any] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """test""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : List[str] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """test""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : int ) -> Union[str, Any]: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.char_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Dict ) -> int: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = None A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def a_ ( self : List[str] ) -> List[Any]: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = torch.randn(1 , 27 , 38 ) A__ = torch.randn(1 , 27 , 5_02_57 ) A__ = torch.randn(1 , 27 , 3_05_22 ) A__ = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
274
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) A : Tuple = logging.getLogger(__name__) def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple: """simple docstring""" A__ = np.argmax(__a , axis=1 ) return np.sum(outputs == labels ) def __lowerCamelCase ( __a :Tuple ) -> Dict: """simple docstring""" with open(__a , encoding="""utf_8""" ) as f: A__ = csv.reader(__a ) A__ = [] next(__a ) # skip the first line for line in tqdm(__a ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]: """simple docstring""" A__ = [] for dataset in encoded_datasets: A__ = len(__a ) A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) A__ = np.zeros((n_batch, 2) , dtype=np.intaa ) A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) A__ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__a ): A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = with_conta A__ = with_conta A__ = len(__a ) - 1 A__ = len(__a ) - 1 A__ = with_conta A__ = with_conta A__ = mc_label A__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) ) return tensor_datasets def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=__a , default="""""" ) parser.add_argument("""--eval_dataset""" , type=__a , default="""""" ) parser.add_argument("""--seed""" , type=__a , default=4_2 ) parser.add_argument("""--num_train_epochs""" , type=__a , default=3 ) parser.add_argument("""--train_batch_size""" , type=__a , default=8 ) parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=__a , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=__a , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=__a , default=0.01 ) parser.add_argument("""--lm_coef""" , type=__a , default=0.9 ) parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 ) parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) A__ = parser.parse_args() print(__a ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A__ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(__a , __a ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset A__ = ["""_start_""", """_delimiter_""", """_classify_"""] A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__a ) A__ = tokenizer.convert_tokens_to_ids(__a ) A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__a ) ) model.to(__a ) # Load and encode the datasets def tokenize_and_encode(__a :Tuple ): if isinstance(__a , __a ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) ) elif isinstance(__a , __a ): return obj return [tokenize_and_encode(__a ) for o in obj] logger.info("""Encoding dataset...""" ) A__ = load_rocstories_dataset(args.train_dataset ) A__ = load_rocstories_dataset(args.eval_dataset ) A__ = (train_dataset, eval_dataset) A__ = tokenize_and_encode(__a ) # Compute the max input length for the Transformer A__ = model.config.n_positions // 2 - 2 A__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders A__ = pre_process_datasets(__a , __a , __a , *__a ) A__ , A__ = tensor_datasets[0], tensor_datasets[1] A__ = TensorDataset(*__a ) A__ = RandomSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size ) A__ = TensorDataset(*__a ) A__ = SequentialSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: A__ = args.max_steps A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1 else: A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs A__ = list(model.named_parameters() ) A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] A__ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon ) A__ = get_linear_schedule_with_warmup( __a , num_warmup_steps=args.warmup_steps , num_training_steps=__a ) if args.do_train: A__ , A__ , A__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): A__ = 0 A__ = 0 A__ = tqdm(__a , desc="""Training""" ) for step, batch in enumerate(__a ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() A__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` A__ = os.path.join(args.output_dir , __a ) A__ = os.path.join(args.output_dir , __a ) torch.save(model_to_save.state_dict() , __a ) model_to_save.config.to_json_file(__a ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__a ) if args.do_eval: model.eval() A__ , A__ = 0, 0 A__ , A__ = 0, 0 for batch in tqdm(__a , desc="""Evaluating""" ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch with torch.no_grad(): A__ , A__ , A__ , A__ = model( __a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = mc_logits.detach().cpu().numpy() A__ = mc_labels.to("""cpu""" ).numpy() A__ = accuracy(__a , __a ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 A__ = eval_loss / nb_eval_steps A__ = eval_accuracy / nb_eval_examples A__ = tr_loss / nb_tr_steps if args.do_train else None A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} A__ = os.path.join(args.output_dir , """eval_results.txt""" ) with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , __a , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
274
1
def __lowerCamelCase ( __a :int = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A__ = limit + 1 A__ = [0] * limit for first_term in range(1 , __a ): for n in range(__a , __a , __a ): A__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A__ = sum(1 for x in frequency[1:limit] if x == 1_0 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
274
import argparse from collections import defaultdict import yaml A : str = '''docs/source/en/_toctree.yml''' def __lowerCamelCase ( __a :str ) -> List[Any]: """simple docstring""" A__ = defaultdict(__a ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__a ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) A__ = sorted(__a , key=lambda __a : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__a ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__a ) # Sort return overview_doc def __lowerCamelCase ( __a :Any=False ) -> List[str]: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(__a ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(__a ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(__a ) # sort overall pipeline doc A__ = clean_doc_toc(__a ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A : Optional[Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
274
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __lowerCamelCase ( *__a :str ) -> Tuple: """simple docstring""" with open(__a , """r""" ) as fh: fcntl.flock(__a , fcntl.LOCK_EX ) try: print(*__a ) finally: fcntl.flock(__a , fcntl.LOCK_UN ) A : Optional[int] = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) A : Dict = torch.device('''cuda''', local_rank) A : Optional[Any] = socket.gethostname() A : Dict = F'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank A : Dict = dist.get_rank() A : Optional[Any] = dist.get_world_size() printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(F'''{gpu} is broken''') raise
274
def __lowerCamelCase ( __a :str ) -> list: """simple docstring""" A__ = [0] * len(__a ) for i in range(1 , len(__a ) ): # use last results for better performance - dynamic programming A__ = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: A__ = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 A__ = j return prefix_result def __lowerCamelCase ( __a :str ) -> int: """simple docstring""" return max(prefix_function(__a ) ) if __name__ == "__main__": import doctest doctest.testmod()
274
1
class lowercase_ : '''simple docstring''' def __init__( self : Dict ) ->List[str]: """simple docstring""" a = {} def __lowerCAmelCase ( self : str ) ->None: """simple docstring""" print(self.vertex ) for i in self.vertex: print(__UpperCAmelCase , ''' -> ''' , ''' -> '''.join([str(__UpperCAmelCase ) for j in self.vertex[i]] ) ) def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->None: """simple docstring""" if from_vertex in self.vertex: self.vertex[from_vertex].append(__UpperCAmelCase ) else: # else make a new vertex a = [to_vertex] def __lowerCAmelCase ( self : Optional[Any] ) ->None: """simple docstring""" a = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase ) def __lowerCAmelCase ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : list ) ->None: """simple docstring""" a = True print(__UpperCAmelCase , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase__ = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
0
def __lowerCamelCase ( __a :int = 1_0_0_0_0_0_0 ) -> int: """simple docstring""" A__ = limit + 1 A__ = [0] * limit for first_term in range(1 , __a ): for n in range(__a , __a , __a ): A__ = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A__ = sum(1 for x in frequency[1:limit] if x == 1_0 ) return count if __name__ == "__main__": print(F'''{solution() = }''')
274
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_: Optional[Any] ={ 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: str =[ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A : '''simple docstring''' def __init__( self : List[Any] ) -> str: """simple docstring""" A__ = [ [], [], [], ] def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" try: if len(self.queues[priority] ) >= 1_00: raise OverflowError("""Maximum queue size is 100""" ) self.queues[priority].append(__lowerCAmelCase ) except IndexError: raise ValueError("""Valid priorities are 0, 1, and 2""" ) def a_ ( self : Optional[Any] ) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("""All queues are empty""" ) def __str__( self : Tuple ) -> str: """simple docstring""" return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) ) class A : '''simple docstring''' def __init__( self : int ) -> str: """simple docstring""" A__ = [] def a_ ( self : int , __lowerCAmelCase : int ) -> None: """simple docstring""" if len(self.queue ) == 1_00: raise OverFlowError("""Maximum queue size is 100""" ) self.queue.append(__lowerCAmelCase ) def a_ ( self : List[str] ) -> int: """simple docstring""" if not self.queue: raise UnderFlowError("""The queue is empty""" ) else: A__ = min(self.queue ) self.queue.remove(__lowerCAmelCase ) return data def __str__( self : List[Any] ) -> str: """simple docstring""" return str(self.queue ) def __lowerCamelCase ( ) -> Optional[Any]: """simple docstring""" A__ = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(__a ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def __lowerCamelCase ( ) -> int: """simple docstring""" A__ = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(__a ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
274
0
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase : str = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) lowerCamelCase : int = [] lowerCamelCase : Optional[int] = [] lowerCamelCase : Tuple = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} lowerCamelCase : Union[str, Any] = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""", 'emoji': True, }, } ] lowerCamelCase : Any = 0 for log in Path().glob('*.log'): lowerCamelCase : int = 0 with open(log, 'r') as f: for line in f: lowerCamelCase : Optional[Any] = json.loads(line) if line.get('nodeid', '') != "": lowerCamelCase : Optional[int] = line['nodeid'] if line.get('duration', None) is not None: lowerCamelCase : Any = f"""{line['duration']:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase : Optional[int] = [] log.unlink() lowerCamelCase : Optional[int] = '' lowerCamelCase : int = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase : str = [] lowerCamelCase : List[str] = {} for test in failed_tests: lowerCamelCase : Dict = test[0].split('::') lowerCamelCase : Optional[int] = data[0].split('/')[-1] if data[0] not in filesafailed: lowerCamelCase : Optional[Any] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase : Any = [test[0] for test in failed_table] lowerCamelCase : List[Any] = list(set(files)) # Count number of instances in failed_tests lowerCamelCase : List[str] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase : str = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: lowerCamelCase : Dict = 'Too many failed tests, please see the full report in the Action results.' lowerCamelCase : List[Any] = len(err) + 10 lowerCamelCase : List[Any] = message[: 3_000 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: lowerCamelCase : Optional[int] = 'No failed tests! 🤗' print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient lowerCamelCase : Optional[int] = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": lowerCamelCase : str = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) lowerCamelCase : Dict = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } payload.append(action_button) lowerCamelCase : Tuple = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""", } ], } payload.append(date_report) lowerCamelCase : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) lowerCamelCase : Tuple = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase : Any = '' for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase : Optional[Any] = row[0] else: lowerCamelCase : int = '' lowerCamelCase : Optional[int] = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
2
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = tempfile.mkdtemp() # fmt: off A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] A__ = {"""unk_token""": """<unk>"""} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__lowerCAmelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(__lowerCAmelCase ) ) A__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], """image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , __lowerCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def a_ ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a_ ( self : str ) -> Any: """simple docstring""" A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a_ ( self : Optional[int] ) -> Tuple: """simple docstring""" A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase ) A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def a_ ( self : List[Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = self.prepare_image_inputs() A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" ) A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def a_ ( self : Optional[Any] ) -> Any: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = processor(text=__lowerCAmelCase ) A__ = tokenizer(__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def a_ ( self : Tuple ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(__lowerCAmelCase ) A__ = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def a_ ( self : Optional[int] ) -> str: """simple docstring""" A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) A__ = """lower newer""" A__ = self.prepare_image_inputs() A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
274
0
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = IFPipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return self._get_dummy_components() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Optional[int]: """simple docstring""" if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ): A : str = torch.manual_seed(SCREAMING_SNAKE_CASE ) else: A : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE ) A : Any = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self._test_save_load_local() def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) A : str = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) A, A : Optional[Any] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() A : List[Any] = None A : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img A : Optional[Any] = IFImgaImgPipeline(**pipe_a.components ) A : Union[str, Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting A : str = IFInpaintingPipeline(**pipe_a.components ) A : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _start_torch_memory_measurement() A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , ) A : Any = output.images[0] assert image.shape == (64, 64, 3) A : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 A : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() A : str = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) A : List[str] = output.images[0] assert image.shape == (256, 256, 3) A : Tuple = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _start_torch_memory_measurement() A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Any = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , ) A : List[str] = output.images[0] assert image.shape == (64, 64, 3) A : Any = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Tuple = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , original_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) A : Any = output.images[0] assert image.shape == (256, 256, 3) A : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" _start_torch_memory_measurement() A : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : List[Any] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , ) A : Any = output.images[0] assert image.shape == (64, 64, 3) A : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # pipeline 2 _start_torch_memory_measurement() A : int = torch.Generator(device='''cpu''' ).manual_seed(0 ) A : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE ) A : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE ) A : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , original_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , ) A : Optional[int] = output.images[0] assert image.shape == (256, 256, 3) A : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
3
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time A : Dict = Lock() def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 1_0 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__a ) process_lock.release() # receive your right neighbor's value process_lock.acquire() A__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left A__ = min(__a , __a ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__a ) process_lock.release() # receive your left neighbor's value process_lock.acquire() A__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right A__ = max(__a , __a ) # after all swaps are performed, send the values back to main result_pipe[1].send(__a ) def __lowerCamelCase ( __a :List[str] ) -> int: """simple docstring""" A__ = [] A__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) A__ = temp_rs A__ = temp_rr for i in range(1 , len(__a ) - 1 ): A__ = Pipe() A__ = Pipe() process_array_.append( Process( target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) A__ = temp_rs A__ = temp_rr process_array_.append( Process( target=__a , args=( len(__a ) - 1, arr[len(__a ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__a ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__a ) ): A__ = result_pipe[p][0].recv() process_array_[p].join() return arr def __lowerCamelCase ( ) -> str: """simple docstring""" A__ = list(range(1_0 , 0 , -1 ) ) print("""Initial List""" ) print(*__a ) A__ = odd_even_transposition(__a ) print("""Sorted List\n""" ) print(*__a ) if __name__ == "__main__": main()
274
0
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : str = (KDPMaDiscreteScheduler,) lowerCamelCase : Optional[int] = 10 def __UpperCAmelCase ( self : List[Any] , **UpperCAmelCase__ : Optional[int] ) -> Any: lowerCAmelCase = { 'num_train_timesteps': 1_1_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**UpperCAmelCase__ ) return config def __UpperCAmelCase ( self : List[str] ) -> Tuple: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[Any] ) -> int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase__ ) def __UpperCAmelCase ( self : int ) -> Dict: lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2 assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0_002 ) < 1E-3 def __UpperCAmelCase ( self : Tuple ) -> Dict: if torch_device == "mps": return lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase = sample.to(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]: if torch_device == "mps": return lowerCAmelCase = self.scheduler_classes[0] lowerCAmelCase = self.get_scheduler_config() lowerCAmelCase = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase__ ) lowerCAmelCase = self.dummy_model() lowerCAmelCase = self.dummy_sample_deter.to(UpperCAmelCase__ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase = scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = output.prev_sample lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) ) lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) ) if str(UpperCAmelCase__ ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4_125 ) < 1E-2 assert abs(result_mean.item() - 0.0_266 ) < 1E-3
4
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowerCamelCase ( __a :Dict ) -> Any: """simple docstring""" A__ = [ """decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(__a , __a ) def __lowerCamelCase ( __a :str ) -> Union[str, Any]: """simple docstring""" A__ , A__ = emb.weight.shape A__ = nn.Linear(__a , __a , bias=__a ) A__ = emb.weight.data return lin_layer def __lowerCamelCase ( __a :str ) -> List[str]: """simple docstring""" A__ = torch.load(__a , map_location="""cpu""" ) A__ = Namespace(**checkpoint["""cfg"""]["""model"""] ) A__ = checkpoint["""model"""] remove_ignore_keys_(__a ) A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0] A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} A__ = XGLMConfig( vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) A__ = XGLMForCausalLM(__a ) A__ = model.load_state_dict(__a , strict=__a ) print(__a ) A__ = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": A : int = argparse.ArgumentParser() # Required parameters parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') A : str = parser.parse_args() A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
274
0
import sys def UpperCAmelCase_ ( __snake_case ) -> str: """simple docstring""" _lowercase =len(__snake_case ) _lowercase =[[0 for x in range(__snake_case )] for x in range(__snake_case )] _lowercase =[[0 for x in range(__snake_case )] for x in range(__snake_case )] for chain_length in range(2 , __snake_case ): for a in range(1 , n - chain_length + 1 ): _lowercase =a + chain_length - 1 _lowercase =sys.maxsize for c in range(__snake_case , __snake_case ): _lowercase =( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: _lowercase =cost _lowercase =c return matrix, sol def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> str: """simple docstring""" if i == j: print('''A''' + str(__snake_case ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(__snake_case , __snake_case , optimal_solution[i][j] ) print_optiomal_solution(__snake_case , optimal_solution[i][j] + 1 , __snake_case ) print(''')''' , end=''' ''' ) def UpperCAmelCase_ ( ) -> Union[str, Any]: """simple docstring""" _lowercase =[30, 35, 15, 5, 10, 20, 25] _lowercase =len(__snake_case ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 _lowercase , _lowercase =matrix_chain_order(__snake_case ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(__snake_case , 1 , n - 1 ) if __name__ == "__main__": main()
5
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A (unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def a_ ( self : Any ) -> str: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = FlaxAlbertModelTester(self ) @slow def a_ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""albert-base-v2""" ) A__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase ) @require_flax class A (unittest.TestCase ): '''simple docstring''' @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCAmelCase ) A__ = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
274
0
from __future__ import annotations def __lowerCAmelCase ( a__ , a__ ) -> list[list[int]]: __a = [] create_all_state(1 , a__ , a__ , [] , a__ ) return result def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ , ) -> None: if level == 0: total_list.append(current_list[:] ) return for i in range(a__ , total_number - level + 2 ): current_list.append(a__ ) create_all_state(i + 1 , a__ , level - 1 , a__ , a__ ) current_list.pop() def __lowerCAmelCase ( a__ ) -> None: for i in total_list: print(*a__ ) if __name__ == "__main__": A : Union[str, Any] = 4 A : Optional[int] = 2 A : Dict = generate_all_combinations(n, k) print_all_state(total_list)
6
from sklearn.metrics import fa_score import datasets A : Any = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' A : List[Any] = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' A : List[Any] = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Any="binary" , __lowerCAmelCase : Optional[int]=None ) -> List[Any]: """simple docstring""" A__ = fa_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase ) return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
274
0
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9}, }, { 'framework': 'tensorflow', 'script': 'run_tf.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9}, }, ] ) class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] )-> List[str]: '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(),encoding='utf-8',check=lowercase_,) assert hasattr(self,'env' ) def snake_case__ ( self : int,lowercase_ : Optional[Any]=1 )-> Optional[int]: '''simple docstring''' return HuggingFace( entry_point=self.script,source_dir=self.env.test_path,role=self.env.role,image_uri=self.env.image_uri,base_job_name=F'{self.env.base_job_name}-single',instance_count=lowercase_,instance_type=self.instance_type,debugger_hook_config=lowercase_,hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path},metric_definitions=self.env.metric_definitions,py_version='py36',) def snake_case__ ( self : List[Any],lowercase_ : List[Any] )-> Optional[int]: '''simple docstring''' TrainingJobAnalytics(lowercase_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) def snake_case__ ( self : Any )-> Optional[int]: '''simple docstring''' A__ = self.create_estimator() # run training estimator.fit() # result dataframe A__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) A__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds',9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json','w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss},lowercase_ )
7
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Union[str, Any] = logging.get_logger(__name__) A : int = { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json''' ), } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Any = '''xlm-roberta''' def __init__( self : Optional[Any] , __lowerCAmelCase : List[Any]=3_05_22 , __lowerCAmelCase : int=7_68 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : str=12 , __lowerCAmelCase : Union[str, Any]=30_72 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Tuple="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=None , **__lowerCAmelCase : str , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase ) A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = layer_norm_eps A__ = position_embedding_type A__ = use_cache A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": A__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: A__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
274
0
from math import pi def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
8
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean A : str = 0 A : Any = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right A : Union[str, Any] = tuple[int, int] class A : '''simple docstring''' def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None: """simple docstring""" A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = g_cost A__ = parent A__ = self.calculate_heuristic() A__ = self.g_cost + self.h_cost def a_ ( self : Dict ) -> float: """simple docstring""" A__ = self.pos_x - self.goal_x A__ = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : int , __lowerCAmelCase : Node ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : '''simple docstring''' def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple: """simple docstring""" A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase ) A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase ) A__ = [self.start] A__ = [] A__ = False def a_ ( self : List[str] ) -> list[TPosition]: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__lowerCAmelCase ) self.closed_nodes.append(__lowerCAmelCase ) A__ = self.get_successors(__lowerCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__lowerCAmelCase ) else: self.open_nodes.append(__lowerCAmelCase ) return [self.start.pos] def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]: """simple docstring""" A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) ) return successors def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]: """simple docstring""" A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path class A : '''simple docstring''' def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None: """simple docstring""" A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = AStar(__lowerCAmelCase , __lowerCAmelCase ) A__ = False def a_ ( self : int ) -> list[TPosition]: """simple docstring""" while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() A__ = self.fwd_astar.open_nodes.pop(0 ) A__ = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __lowerCAmelCase , __lowerCAmelCase ) self.fwd_astar.closed_nodes.append(__lowerCAmelCase ) self.bwd_astar.closed_nodes.append(__lowerCAmelCase ) A__ = current_bwd_node A__ = current_fwd_node A__ = { self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ), self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__lowerCAmelCase ) else: # retrieve the best current path A__ = astar.open_nodes.pop( astar.open_nodes.index(__lowerCAmelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__lowerCAmelCase ) else: astar.open_nodes.append(__lowerCAmelCase ) return [self.fwd_astar.start.pos] def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]: """simple docstring""" A__ = self.fwd_astar.retrace_path(__lowerCAmelCase ) A__ = self.bwd_astar.retrace_path(__lowerCAmelCase ) bwd_path.pop() bwd_path.reverse() A__ = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] A : Optional[int] = (0, 0) A : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) A : Dict = time.time() A : Optional[Any] = AStar(init, goal) A : Optional[int] = a_star.search() A : Optional[int] = time.time() - start_time print(F'''AStar execution time = {end_time:f} seconds''') A : Dict = time.time() A : Tuple = BidirectionalAStar(init, goal) A : List[Any] = time.time() - bd_start_time print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
274
0
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class _lowercase ( unittest.TestCase ): '''simple docstring''' @slow def __magic_name__( self :str ) -> int: for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = FlaxAutoModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def __magic_name__( self :Dict ) -> Dict: for model_name in ["roberta-base", "roberta-large"]: with self.subTest(lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow def __magic_name__( self :Optional[int] ) -> List[Any]: for model_name in ["bert-base-cased", "bert-large-uncased"]: __SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel.from_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**lowerCAmelCase__ :List[Any] ): return model(**lowerCAmelCase__ ) eval(**lowerCAmelCase__ ).block_until_ready() @slow def __magic_name__( self :List[str] ) -> Optional[int]: for model_name in ["roberta-base", "roberta-large"]: __SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = FlaxRobertaModel.from_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**lowerCAmelCase__ :List[Any] ): return model(**lowerCAmelCase__ ) eval(**lowerCAmelCase__ ).block_until_ready() def __magic_name__( self :Dict ) -> Union[str, Any]: with self.assertRaisesRegex( lowerCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained('''bert-base''' ) def __magic_name__( self :int ) -> Optional[int]: with self.assertRaisesRegex( lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): __SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' ) def __magic_name__( self :str ) -> Tuple: with self.assertRaisesRegex( lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): __SCREAMING_SNAKE_CASE : str = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __magic_name__( self :Dict ) -> int: with self.assertRaisesRegex(lowerCAmelCase__ , '''Use `from_pt=True` to load this model''' ): __SCREAMING_SNAKE_CASE : Dict = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
9
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class A (unittest.TestCase ): '''simple docstring''' def a_ ( self : Any ) -> Union[str, Any]: """simple docstring""" A__ = ["""a""", """b""", """c"""] # Defaults to last layer if both are None A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""c"""] ) self.assertEqual(__lowerCAmelCase , [2] ) # Out indices set to match out features A__ , A__ = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features set to match out indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [0, 2] ) # Out features selected from negative indices A__ , A__ = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase ) self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] ) self.assertEqual(__lowerCAmelCase , [-3, -1] ) def a_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase ) # Out features must be a list with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] ) # Out features must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] ) # Out indices must be a list or tuple with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] ) # Out indices must be a subset of stage names with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] ) # Out features and out indices must be the same length with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] ) # Out features should match out indices with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] ) # Out features and out indices should be in order with self.assertRaises(__lowerCAmelCase ): verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] ) # Check passes with valid inputs verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" A__ = BackboneMixin() A__ = ["""a""", """b""", """c"""] A__ = ["""a""", """c"""] A__ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly A__ = ["""a""", """b"""] self.assertEqual(backbone.out_features , ["""a""", """b"""] ) self.assertEqual(backbone.out_indices , [0, 1] ) A__ = [-3, -1] self.assertEqual(backbone.out_features , ["""a""", """c"""] ) self.assertEqual(backbone.out_indices , [-3, -1] )
274
0
import doctest from collections import deque import numpy as np class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Any) ->None: '''simple docstring''' lowerCamelCase__: Tuple =[2, 1, 2, -1] lowerCamelCase__: List[Any] =[1, 2, 3, 4] def SCREAMING_SNAKE_CASE_ (self : Tuple) ->list[float]: '''simple docstring''' lowerCamelCase__: Dict =len(self.first_signal) lowerCamelCase__: Union[str, Any] =len(self.second_signal) lowerCamelCase__: int =max(UpperCAmelCase_ , UpperCAmelCase_) # create a zero matrix of max_length x max_length lowerCamelCase__: List[Any] =[[0] * max_length for i in range(UpperCAmelCase_)] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(UpperCAmelCase_): lowerCamelCase__: List[str] =deque(self.second_signal) rotated_signal.rotate(UpperCAmelCase_) for j, item in enumerate(UpperCAmelCase_): matrix[i][j] += item # multiply the matrix with the first signal lowerCamelCase__: Optional[int] =np.matmul(np.transpose(UpperCAmelCase_) , np.transpose(self.first_signal)) # rounding-off to two decimal places return [round(UpperCAmelCase_ , 2) for i in final_signal] if __name__ == "__main__": doctest.testmod()
10
from collections import deque class A : '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None: """simple docstring""" A__ = process_name # process name A__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time A__ = arrival_time A__ = burst_time # remaining burst time A__ = 0 # total time of the process wait in ready queue A__ = 0 # time from arrival time to completion time class A : '''simple docstring''' def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None: """simple docstring""" A__ = number_of_queues # time slice of queues that round robin algorithm applied A__ = time_slices # unfinished process is in this ready_queue A__ = queue # current time A__ = current_time # finished process is in this sequence queue A__ = deque() def a_ ( self : Dict ) -> list[str]: """simple docstring""" A__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]: """simple docstring""" A__ = [] for i in range(len(__lowerCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]: """simple docstring""" return [q.burst_time for q in queue] def a_ ( self : Any , __lowerCAmelCase : Process ) -> int: """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]: """simple docstring""" A__ = deque() # sequence deque of finished process while len(__lowerCAmelCase ) != 0: A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__lowerCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 A__ = 0 # set the process's turnaround time because it is finished A__ = self.current_time - cp.arrival_time # set the completion time A__ = self.current_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]: """simple docstring""" A__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__lowerCAmelCase ) ): A__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__lowerCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time A__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__lowerCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished A__ = 0 # set the finish time A__ = self.current_time # update the process' turnaround time because it is finished A__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__lowerCAmelCase ) self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def a_ ( self : List[Any] ) -> deque[Process]: """simple docstring""" for i in range(self.number_of_queues - 1 ): A__ , A__ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest A : Union[str, Any] = Process('''P1''', 0, 5_3) A : Optional[Any] = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : int = Process('''P4''', 0, 2_4) A : Any = 3 A : List[Any] = [1_7, 2_5] A : Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) A : Optional[Any] = Process('''P1''', 0, 5_3) A : int = Process('''P2''', 0, 1_7) A : Optional[int] = Process('''P3''', 0, 6_8) A : Tuple = Process('''P4''', 0, 2_4) A : Union[str, Any] = 3 A : Optional[Any] = [1_7, 2_5] A : Tuple = deque([Pa, Pa, Pa, Pa]) A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0) A : Dict = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( F'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( F'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( F'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
274
0
from collections.abc import Callable import numpy as np def _UpperCAmelCase (UpperCamelCase__ : Callable , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ): _A : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) _A : Tuple = np.zeros((n + 1,) ) _A : Union[str, Any] = ya _A : List[str] = xa for k in range(UpperCamelCase__ ): _A : Any = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
11
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType A : str = logging.get_logger(__name__) A : Union[str, Any] = { '''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''', } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Optional[Any] = '''layoutlmv3''' def __init__( self : Tuple , __lowerCAmelCase : Optional[int]=5_02_65 , __lowerCAmelCase : Tuple=7_68 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=12 , __lowerCAmelCase : List[Any]=30_72 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : Dict=5_12 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=1e-5 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : int=0 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : Tuple=10_24 , __lowerCAmelCase : List[str]=1_28 , __lowerCAmelCase : Optional[int]=1_28 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Any=1_28 , __lowerCAmelCase : str=64 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : int=True , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=2_24 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=16 , __lowerCAmelCase : Dict=None , **__lowerCAmelCase : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=__lowerCAmelCase , hidden_size=__lowerCAmelCase , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , intermediate_size=__lowerCAmelCase , hidden_act=__lowerCAmelCase , hidden_dropout_prob=__lowerCAmelCase , attention_probs_dropout_prob=__lowerCAmelCase , max_position_embeddings=__lowerCAmelCase , type_vocab_size=__lowerCAmelCase , initializer_range=__lowerCAmelCase , layer_norm_eps=__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = max_ad_position_embeddings A__ = coordinate_size A__ = shape_size A__ = has_relative_attention_bias A__ = rel_pos_bins A__ = max_rel_pos A__ = has_spatial_attention_bias A__ = rel_ad_pos_bins A__ = max_rel_ad_pos A__ = text_embed A__ = visual_embed A__ = input_size A__ = num_channels A__ = patch_size A__ = classifier_dropout class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : List[str] = version.parse('''1.12''' ) @property def a_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def a_ ( self : Optional[int] ) -> float: """simple docstring""" return 1e-5 @property def a_ ( self : Tuple ) -> int: """simple docstring""" return 12 def a_ ( self : str , __lowerCAmelCase : "ProcessorMixin" , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional["TensorType"] = None , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 40 , __lowerCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , """apply_ocr""" , __lowerCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = processor.tokenizer.num_special_tokens_to_add(__lowerCAmelCase ) A__ = compute_effective_axis_dimension( __lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence A__ = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes A__ = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) A__ = self._generate_dummy_images(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) A__ = dict( processor( __lowerCAmelCase , text=__lowerCAmelCase , boxes=__lowerCAmelCase , return_tensors=__lowerCAmelCase , ) ) return inputs
274
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
12
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = '''▁''' A : Any = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A : List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } A : Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off A : Optional[int] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None: """simple docstring""" A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = language_codes A__ = FAIRSEQ_LANGUAGE_CODES[language_codes] A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} A__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = load_json(__lowerCAmelCase ) A__ = {v: k for k, v in self.encoder.items()} A__ = spm_file A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) A__ = len(self.encoder ) A__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} A__ = {v: k for k, v in self.lang_token_to_id.items()} A__ = src_lang if src_lang is not None else """en""" A__ = tgt_lang A__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A__ = num_madeup_words @property def a_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def a_ ( self : Optional[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = [] A__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : int ) -> Dict: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = load_spm(self.spm_file , self.sp_model_kwargs ) def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding: """simple docstring""" A__ = src_lang A__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A__ = src_lang A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.get_lang_id(__lowerCAmelCase ) A__ = tgt_lang_id return inputs def a_ ( self : Dict ) -> int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" return self.lang_code_to_token[lang] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" A__ = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __lowerCamelCase ( __a :str ) -> Union[Dict, List]: """simple docstring""" with open(__a , """r""" ) as f: return json.load(__a ) def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None: """simple docstring""" with open(__a , """w""" ) as f: json.dump(__a , __a , indent=2 )
274
0
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[Any] = pa.array(TypedSequence([1, 2, 3])) self.assertEqual(arr.type , pa.intaa()) def _SCREAMING_SNAKE_CASE ( self : str): with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Optional[int] = pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa()) def _SCREAMING_SNAKE_CASE ( self : Dict): with self.assertRaises(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Dict = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool") , type=Value("int64"))) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Dict = pa.array(TypedSequence([1, 2, 3] , type=Value("int32"))) self.assertEqual(arr.type , pa.intaa()) def _SCREAMING_SNAKE_CASE ( self : str): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): SCREAMING_SNAKE_CASE_: List[str] = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64"))) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Optional[int] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32"))) self.assertEqual(arr.type , pa.intaa()) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Dict = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64"))) self.assertEqual(arr.type , pa.string()) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: int = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64"))) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64")) def _SCREAMING_SNAKE_CASE ( self : str): with self.assertRaises((TypeError, pa.lib.ArrowInvalid)): SCREAMING_SNAKE_CASE_: List[str] = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64"))) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64"))) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64")) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[Any] = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64"))) self.assertEqual(arr.type , pa.string()) @require_pil def _SCREAMING_SNAKE_CASE ( self : str): import PIL.Image SCREAMING_SNAKE_CASE_: Tuple = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5)) with patch( "datasets.arrow_writer.cast_to_python_objects" , side_effect=lowerCAmelCase__) as mock_cast_to_python_objects: SCREAMING_SNAKE_CASE_: Tuple = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image())) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting" , lowerCAmelCase__) self.assertFalse(kwargs["optimize_list_casting"]) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = pa.BufferReader(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , pa.Buffer ) else pa.memory_map(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = pa.ipc.open_stream(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: pa.Table = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_: Optional[Any] = pa.schema(_UpperCAmelCase ) if fields else None with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_: str = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def A_ ( ): SCREAMING_SNAKE_CASE_: Optional[Any] = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_: int = Features({"labels": ClassLabel(names=["neg", "pos"] )} ) with ArrowWriter(stream=_UpperCAmelCase , features=_UpperCAmelCase ) as writer: writer.write({"labels": 0} ) writer.write({"labels": 1} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata SCREAMING_SNAKE_CASE_: Any = pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE_: Optional[int] = pa.ipc.open_stream(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: pa.Table = f.read_all() SCREAMING_SNAKE_CASE_: str = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_UpperCAmelCase ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = pa.BufferOutputStream() with ArrowWriter( stream=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase , hash_salt="split_name" , check_duplicates=_UpperCAmelCase , ) as writer: with pytest.raises(_UpperCAmelCase ): writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = pa.BufferOutputStream() with ArrowWriter( stream=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase , hash_salt="split_name" , check_duplicates=_UpperCAmelCase , ) as writer: with pytest.raises(_UpperCAmelCase ): writer.write({"col_1": "foo", "col_2": 1} , key=10 ) writer.write({"col_1": "bar", "col_2": 2} , key=10 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = pa.BufferOutputStream() with ArrowWriter( stream=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase , hash_salt="split_name" , check_duplicates=_UpperCAmelCase , ) as writer: writer.write({"col_1": "foo", "col_2": 1} , key=1 ) writer.write({"col_1": "bar", "col_2": 2} , key=2 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_: List[Any] = pa.schema(_UpperCAmelCase ) if fields else None with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) writer.write_batch({"col_1": [], "col_2": []} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_: List[str] = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_: str = pa.schema(_UpperCAmelCase ) if fields else None with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_: Dict = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = pa.BufferOutputStream() SCREAMING_SNAKE_CASE_: Any = pa.schema(_UpperCAmelCase ) if fields else None with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) ) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: SCREAMING_SNAKE_CASE_: List[str] = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def A_ ( ): with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_: Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()} SCREAMING_SNAKE_CASE_: Any = os.path.join(_UpperCAmelCase , "test.arrow" ) with ArrowWriter(path=_UpperCAmelCase , schema=pa.schema(_UpperCAmelCase ) ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata ) _check_output(_UpperCAmelCase , 1 ) def A_ ( _UpperCAmelCase ): if pa.types.is_list(_UpperCAmelCase ): return get_base_dtype(arr_type.value_type ) else: return arr_type def A_ ( _UpperCAmelCase , _UpperCAmelCase ): if isinstance(lst[0] , _UpperCAmelCase ): change_first_primitive_element_in_list(lst[0] , _UpperCAmelCase ) else: SCREAMING_SNAKE_CASE_: Optional[int] = value @pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = pa.array(TypedSequence(_UpperCAmelCase , optimized_int_type=_UpperCAmelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype" , [ ("attention_mask", pa.inta()), ("special_tokens_mask", pa.inta()), ("token_type_ids", pa.inta()), ("input_ids", pa.intaa()), ("other", pa.intaa()), ] , ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # in range SCREAMING_SNAKE_CASE_: List[str] = pa.array(OptimizedTypedSequence(_UpperCAmelCase , col=_UpperCAmelCase ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications SCREAMING_SNAKE_CASE_: Optional[int] = copy.deepcopy(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = pa.array(OptimizedTypedSequence(_UpperCAmelCase , col=_UpperCAmelCase ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("raise_exception" , [False, True] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = str(tmp_path / "dataset-train.arrow" ) try: with ArrowWriter(path=_UpperCAmelCase ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = "mock://dataset-train.arrow" with ArrowWriter(path=_UpperCAmelCase , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(_UpperCAmelCase ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = pa.BufferOutputStream() with ParquetWriter(stream=_UpperCAmelCase ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 SCREAMING_SNAKE_CASE_: int = pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE_: pa.Table = pq.read_table(_UpperCAmelCase ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files" , [False, True] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): import PIL.Image SCREAMING_SNAKE_CASE_: Any = str(tmp_path / "test_image_rgb.jpg" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_UpperCAmelCase , format="png" ) SCREAMING_SNAKE_CASE_: Any = pa.BufferOutputStream() with ParquetWriter( stream=_UpperCAmelCase , features=Features({"image": Image()} ) , embed_local_files=_UpperCAmelCase ) as writer: writer.write({"image": image_path} ) writer.finalize() SCREAMING_SNAKE_CASE_: Union[str, Any] = pa.BufferReader(output.getvalue() ) SCREAMING_SNAKE_CASE_: pa.Table = pq.read_table(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"] , _UpperCAmelCase ) with open(_UpperCAmelCase , "rb" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = pa.schema([pa.field("col_1" , pa.string() , nullable=_UpperCAmelCase )] ) SCREAMING_SNAKE_CASE_: int = pa.BufferOutputStream() with ArrowWriter(stream=_UpperCAmelCase ) as writer: writer._build_writer(inferred_schema=_UpperCAmelCase ) assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
13
from __future__ import annotations from PIL import Image # Define glider example A : Any = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example A : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def __lowerCamelCase ( __a :list[list[int]] ) -> list[list[int]]: """simple docstring""" A__ = [] for i in range(len(__a ) ): A__ = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A__ = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(__a ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(__a ) - 1: neighbour_count += cells[i + 1][j] if i < len(__a ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A__ = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(__a ) return next_generation def __lowerCamelCase ( __a :list[list[int]] , __a :int ) -> list[Image.Image]: """simple docstring""" A__ = [] for _ in range(__a ): # Create output image A__ = Image.new("""RGB""" , (len(cells[0] ), len(__a )) ) A__ = img.load() # Save cells to image for x in range(len(__a ) ): for y in range(len(cells[0] ) ): A__ = 2_5_5 - cells[y][x] * 2_5_5 A__ = (colour, colour, colour) # Save image images.append(__a ) A__ = new_generation(__a ) return images if __name__ == "__main__": A : str = generate_images(GLIDER, 1_6) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
274
0
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = [0] * len(lowercase_ ) A__ = [] A__ = [1] * len(lowercase_ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowercase_ ) ): if indegree[i] == 0: queue.append(lowercase_ ) while queue: A__ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: A__ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(lowercase_ ) print(max(lowercase_ ) ) # Adjacency list of Graph _lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
14
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A : List[str] = input('''Enter image url: ''').strip() print(F'''Downloading image from {url} ...''') A : Any = BeautifulSoup(requests.get(url).content, '''html.parser''') # The image URL is in the content field of the first meta tag with property og:image A : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content'''] A : Dict = requests.get(image_url).content A : Tuple = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, '''wb''') as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
274
0
import math class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,A : Dict=0 ): # a graph with Node 0,1,...,N-1 __A = n __A = [ [math.inf for j in range(0 ,A )] for i in range(0 ,A ) ] # adjacency matrix for weight __A = [ [math.inf for j in range(0 ,A )] for i in range(0 ,A ) ] # dp[i][j] stores minimum distance from i to j def UpperCamelCase_ ( self : List[str] ,A : Tuple ,A : Union[str, Any] ,A : Optional[int] ): __A = w def UpperCamelCase_ ( self : int ): for k in range(0 ,self.n ): for i in range(0 ,self.n ): for j in range(0 ,self.n ): __A = min(self.dp[i][j] ,self.dp[i][k] + self.dp[k][j] ) def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Union[str, Any] ): return self.dp[u][v] if __name__ == "__main__": SCREAMING_SNAKE_CASE :Optional[int] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
15
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowerCamelCase ( __a :List[str] , __a :List[Any] , __a :Union[str, Any] , __a :List[Any] ) -> Dict: """simple docstring""" A__ = multiprocessing.Manager() A__ = manager.list() A__ = multiprocessing.Process(target=__a , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowerCamelCase ( __a :Optional[Any] , __a :Any , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil A__ = shutil.rmtree A__ = os.rmdir A__ = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: A__ = {} with swallow_io(): with time_limit(__a ): exec(__a , __a ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F'failed: {e}' ) # Needed for cleaning up. A__ = rmtree A__ = rmdir A__ = chdir @contextlib.contextmanager def __lowerCamelCase ( __a :List[str] ) -> Dict: """simple docstring""" def signal_handler(__a :List[Any] , __a :Optional[Any] ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __a ) signal.signal(signal.SIGALRM , __a ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = WriteOnlyStringIO() with contextlib.redirect_stdout(__a ): with contextlib.redirect_stderr(__a ): with redirect_stdin(__a ): yield @contextlib.contextmanager def __lowerCamelCase ( ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(__a ): yield dirname class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' pass class A (io.StringIO ): '''simple docstring''' def a_ ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str ) -> Dict: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Optional[int] ) -> str: """simple docstring""" raise OSError def a_ ( self : Optional[Any] , *__lowerCAmelCase : Any , **__lowerCAmelCase : Any ) -> int: """simple docstring""" raise OSError def a_ ( self : str , *__lowerCAmelCase : Any , **__lowerCAmelCase : Union[str, Any] ) -> int: """simple docstring""" return False class A (contextlib._RedirectStream ): # type: ignore '''simple docstring''' __lowerCamelCase : Union[str, Any] = '''stdin''' @contextlib.contextmanager def __lowerCamelCase ( __a :Union[str, Any] ) -> List[str]: """simple docstring""" if root == ".": yield return A__ = os.getcwd() os.chdir(__a ) try: yield except BaseException as exc: raise exc finally: os.chdir(__a ) def __lowerCamelCase ( __a :Union[str, Any]=None ) -> Dict: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins A__ = None A__ = None import os A__ = """1""" A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None A__ = None import shutil A__ = None A__ = None A__ = None import subprocess A__ = None # type: ignore A__ = None import sys A__ = None A__ = None A__ = None A__ = None A__ = None
274
0
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None ) -> list[list[str]]: lowercase__ : List[str] = word_bank or [] # create a table lowercase__ : int = len(__lowerCamelCase ) + 1 lowercase__ : list[list[list[str]]] = [] for _ in range(__lowerCamelCase ): table.append([] ) # seed value lowercase__ : Union[str, Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(__lowerCamelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__lowerCamelCase )] == word: lowercase__ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__lowerCamelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__lowerCamelCase )]: combination.reverse() return table[len(__lowerCamelCase )] if __name__ == "__main__": print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa'])) print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't'])) print( all_construct( 'hexagonosaurus', ['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'], ) )
16
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: A : Tuple = None A : Optional[Any] = logging.get_logger(__name__) A : Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A : List[str] = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } A : List[str] = { '''albert-base-v1''': 5_1_2, '''albert-large-v1''': 5_1_2, '''albert-xlarge-v1''': 5_1_2, '''albert-xxlarge-v1''': 5_1_2, '''albert-base-v2''': 5_1_2, '''albert-large-v2''': 5_1_2, '''albert-xlarge-v2''': 5_1_2, '''albert-xxlarge-v2''': 5_1_2, } A : Optional[int] = '''▁''' class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : str = VOCAB_FILES_NAMES __lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : List[str] = AlbertTokenizer def __init__( self : Tuple , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]="[CLS]" , __lowerCAmelCase : int="[SEP]" , __lowerCAmelCase : Dict="<unk>" , __lowerCAmelCase : Dict="[SEP]" , __lowerCAmelCase : Union[str, Any]="<pad>" , __lowerCAmelCase : str="[CLS]" , __lowerCAmelCase : int="[MASK]" , **__lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" A__ = ( AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token ) super().__init__( __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = False if not self.vocab_file else True def a_ ( self : List[Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( __lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ): copyfile(self.vocab_file , __lowerCAmelCase ) return (out_vocab_file,)
274
0
"""simple docstring""" _a = 8.3_144_598 def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float) -> float: '''simple docstring''' if temperature < 0: raise Exception("Temperature cannot be less than 0 K") if molar_mass <= 0: raise Exception("Molar mass cannot be less than or equal to 0 kg/mol") else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example _a = 3_00 _a = 28 _a = rms_speed_of_molecule(temperature, molar_mass) print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
17
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
0
__lowerCamelCase : List[str] = {str(digit): digit**5 for digit in range(10)} def _snake_case ( lowerCAmelCase : int ): """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCAmelCase ) ) def _snake_case ( ): """simple docstring""" return sum( number for number in range(1_0_0_0 , 1_0_0_0_0_0_0 ) if number == digits_fifth_powers_sum(lowerCAmelCase ) ) if __name__ == "__main__": print(solution())
18
from math import ceil def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int: """simple docstring""" A__ = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): A__ = 2 * i + 1 A__ = 2 * i A__ = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: A : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
274
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def lowerCamelCase_ ( ): lowerCamelCase_ = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ = get_sagemaker_input() else: lowerCamelCase_ = get_cluster_input() return config def lowerCamelCase_ ( lowerCamelCase__=None ): if subparsers is not None: lowerCamelCase_ = subparsers.add_parser("config" , description=lowerCamelCase__ ) else: lowerCamelCase_ = argparse.ArgumentParser("Accelerate config command" , description=lowerCamelCase__ ) parser.add_argument( "--config_file" , default=lowerCamelCase__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=lowerCamelCase__ ) return parser def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = get_user_input() if args.config_file is not None: lowerCamelCase_ = args.config_file else: if not os.path.isdir(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) lowerCamelCase_ = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(lowerCamelCase__ ) else: config.to_yaml_file(lowerCamelCase__ ) print(F'accelerate configuration saved at {config_file}' ) def lowerCamelCase_ ( ): lowerCamelCase_ = config_command_parser() lowerCamelCase_ = parser.parse_args() config_command(lowerCamelCase__ ) if __name__ == "__main__": main()
19
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) A : Tuple = logging.getLogger(__name__) def __lowerCamelCase ( __a :Optional[int] , __a :List[str] ) -> Tuple: """simple docstring""" A__ = np.argmax(__a , axis=1 ) return np.sum(outputs == labels ) def __lowerCamelCase ( __a :Tuple ) -> Dict: """simple docstring""" with open(__a , encoding="""utf_8""" ) as f: A__ = csv.reader(__a ) A__ = [] next(__a ) # skip the first line for line in tqdm(__a ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def __lowerCamelCase ( __a :Optional[int] , __a :List[Any] , __a :Dict , __a :Optional[Any] , __a :Optional[Any] , __a :int ) -> Union[str, Any]: """simple docstring""" A__ = [] for dataset in encoded_datasets: A__ = len(__a ) A__ = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) A__ = np.zeros((n_batch, 2) , dtype=np.intaa ) A__ = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) A__ = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__a ): A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] A__ = with_conta A__ = with_conta A__ = len(__a ) - 1 A__ = len(__a ) - 1 A__ = with_conta A__ = with_conta A__ = mc_label A__ = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__a ) for t in all_inputs ) ) return tensor_datasets def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=__a , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=__a , default="""""" ) parser.add_argument("""--eval_dataset""" , type=__a , default="""""" ) parser.add_argument("""--seed""" , type=__a , default=4_2 ) parser.add_argument("""--num_train_epochs""" , type=__a , default=3 ) parser.add_argument("""--train_batch_size""" , type=__a , default=8 ) parser.add_argument("""--eval_batch_size""" , type=__a , default=1_6 ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__a , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=__a , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=__a , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=__a , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=__a , default=6.25E-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=__a , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=__a , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=__a , default=0.01 ) parser.add_argument("""--lm_coef""" , type=__a , default=0.9 ) parser.add_argument("""--n_valid""" , type=__a , default=3_7_4 ) parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" ) A__ = parser.parse_args() print(__a ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) A__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) A__ = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(__a , __a ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset A__ = ["""_start_""", """_delimiter_""", """_classify_"""] A__ = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__a ) A__ = tokenizer.convert_tokens_to_ids(__a ) A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__a ) ) model.to(__a ) # Load and encode the datasets def tokenize_and_encode(__a :Tuple ): if isinstance(__a , __a ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__a ) ) elif isinstance(__a , __a ): return obj return [tokenize_and_encode(__a ) for o in obj] logger.info("""Encoding dataset...""" ) A__ = load_rocstories_dataset(args.train_dataset ) A__ = load_rocstories_dataset(args.eval_dataset ) A__ = (train_dataset, eval_dataset) A__ = tokenize_and_encode(__a ) # Compute the max input length for the Transformer A__ = model.config.n_positions // 2 - 2 A__ = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) A__ = min(__a , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders A__ = pre_process_datasets(__a , __a , __a , *__a ) A__ , A__ = tensor_datasets[0], tensor_datasets[1] A__ = TensorDataset(*__a ) A__ = RandomSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.train_batch_size ) A__ = TensorDataset(*__a ) A__ = SequentialSampler(__a ) A__ = DataLoader(__a , sampler=__a , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: A__ = args.max_steps A__ = args.max_steps // (len(__a ) // args.gradient_accumulation_steps) + 1 else: A__ = len(__a ) // args.gradient_accumulation_steps * args.num_train_epochs A__ = list(model.named_parameters() ) A__ = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""] A__ = [ { """params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], """weight_decay""": args.weight_decay, }, {"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0}, ] A__ = AdamW(__a , lr=args.learning_rate , eps=args.adam_epsilon ) A__ = get_linear_schedule_with_warmup( __a , num_warmup_steps=args.warmup_steps , num_training_steps=__a ) if args.do_train: A__ , A__ , A__ = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): A__ = 0 A__ = 0 A__ = tqdm(__a , desc="""Training""" ) for step, batch in enumerate(__a ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch A__ = model(__a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() A__ = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 A__ = """Training loss: {:.2e} lr: {:.2e}""".format(__a , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer A__ = model.module if hasattr(__a , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` A__ = os.path.join(args.output_dir , __a ) A__ = os.path.join(args.output_dir , __a ) torch.save(model_to_save.state_dict() , __a ) model_to_save.config.to_json_file(__a ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned A__ = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) A__ = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__a ) if args.do_eval: model.eval() A__ , A__ = 0, 0 A__ , A__ = 0, 0 for batch in tqdm(__a , desc="""Evaluating""" ): A__ = tuple(t.to(__a ) for t in batch ) A__ , A__ , A__ , A__ = batch with torch.no_grad(): A__ , A__ , A__ , A__ = model( __a , mc_token_ids=__a , lm_labels=__a , mc_labels=__a ) A__ = mc_logits.detach().cpu().numpy() A__ = mc_labels.to("""cpu""" ).numpy() A__ = accuracy(__a , __a ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 A__ = eval_loss / nb_eval_steps A__ = eval_accuracy / nb_eval_examples A__ = tr_loss / nb_tr_steps if args.do_train else None A__ = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss} A__ = os.path.join(args.output_dir , """eval_results.txt""" ) with open(__a , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , __a , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
274
0
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: if index == r: for j in range(SCREAMING_SNAKE_CASE__ ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location lowercase : Tuple = arr[i] combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]: # A temporary array to store all combination one by one lowercase : Optional[int] = [0] * r # Print all combination using temporary array 'data[]' combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 ) if __name__ == "__main__": # Driver code to check the function above lowercase : int = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
20
import argparse from collections import defaultdict import yaml A : str = '''docs/source/en/_toctree.yml''' def __lowerCamelCase ( __a :str ) -> List[Any]: """simple docstring""" A__ = defaultdict(__a ) A__ = [] A__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(__a ) A__ = new_doc_list A__ = [key for key, value in counts.items() if value > 1] A__ = [] for duplicate_key in duplicates: A__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(__a ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) A__ = sorted(__a , key=lambda __a : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__a ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(__a ) # Sort return overview_doc def __lowerCamelCase ( __a :Any=False ) -> List[str]: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 A__ = api_doc[scheduler_idx]["""sections"""] A__ = clean_doc_toc(__a ) A__ = False if new_scheduler_doc != scheduler_doc: A__ = True if overwrite: A__ = new_scheduler_doc if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def __lowerCamelCase ( __a :Optional[int]=False ) -> Dict: """simple docstring""" with open(__a , encoding="""utf-8""" ) as f: A__ = yaml.safe_load(f.read() ) # Get to the API doc A__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 A__ = content[api_idx]["""sections"""] # Then to the model doc A__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 A__ = False A__ = api_doc[pipeline_idx]["""sections"""] A__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: A__ = pipeline_doc["""section"""] A__ = clean_doc_toc(__a ) if overwrite: A__ = new_sub_pipeline_doc new_pipeline_docs.append(__a ) # sort overall pipeline doc A__ = clean_doc_toc(__a ) if new_pipeline_docs != pipeline_docs: A__ = True if overwrite: A__ = new_pipeline_docs if diff: if overwrite: A__ = api_doc with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(yaml.dump(__a , allow_unicode=__a ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": A : Tuple = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') A : Optional[Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
274
0