code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCamelCase_ = { '''vocab_file''': { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''', } } lowerCamelCase_ = { '''xlnet-base-cased''': None, '''xlnet-large-cased''': None, } # Segments (not really needed) lowerCamelCase_ = 0 lowerCamelCase_ = 1 lowerCamelCase_ = 2 lowerCamelCase_ = 3 lowerCamelCase_ = 4 class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = '''left''' def __init__( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Tuple="<sep>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : int="<cls>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Any]=["<eop>", "<eod>"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ): '''simple docstring''' _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _A = 3 _A = do_lower_case _A = remove_space _A = keep_accents _A = vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return len(self.sp_model ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): '''simple docstring''' _A = self.__dict__.copy() _A = None return state def __setstate__( self : Any , __UpperCAmelCase : str ): '''simple docstring''' _A = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if self.remove_space: _A = " ".join(inputs.strip().split() ) else: _A = inputs _A = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: _A = unicodedata.normalize("NFKD" , __UpperCAmelCase ) _A = "".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _A = outputs.lower() return outputs def lowerCAmelCase ( self : str , __UpperCAmelCase : str ): '''simple docstring''' _A = self.preprocess_text(__UpperCAmelCase ) _A = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _A = [] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): _A = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _A = cur_pieces[1:] else: _A = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' return self.sp_model.PieceToId(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] ): '''simple docstring''' return self.sp_model.IdToPiece(__UpperCAmelCase ) def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ): '''simple docstring''' _A = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip() return out_string def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : List[Any] , ): '''simple docstring''' _A = kwargs.pop("use_source_tokenizer" , __UpperCAmelCase ) _A = self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _A = [] _A = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _A = [] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _A = "".join(__UpperCAmelCase ) _A = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _A = self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , "wb" ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
79
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
1
'''simple docstring''' from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
79
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase ) -> int: '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def __lowercase ( ) -> None: '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
1
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : int=30 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Any=32 , __UpperCAmelCase : int=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Any=10 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Union[str, Any]=0.6 , __UpperCAmelCase : Any=None , ): '''simple docstring''' _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = mask_ratio _A = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ): '''simple docstring''' _A = ViTMAEModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ): '''simple docstring''' _A = ViTMAEForPreTraining(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) _A = (self.image_size // self.patch_size) ** 2 _A = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _A = 1 _A = ViTMAEForPreTraining(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(__UpperCAmelCase ) _A = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () snake_case = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} snake_case = False snake_case = False snake_case = False snake_case = False def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = ViTMAEModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds" ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : int ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] ): '''simple docstring''' np.random.seed(2 ) _A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = torch.from_numpy(__UpperCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _A = pt_noise super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _A = outputs[0].cpu().numpy() _A = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) _A = model_class.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) # Make sure we don't have nans _A = after_outputs[0].cpu().numpy() _A = 0 _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-5 ) @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip( reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowerCAmelCase ( self : Any ): '''simple docstring''' pass @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = ViTMAEModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowercase ( ) -> Optional[int]: '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase ( self : Any ): '''simple docstring''' return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' np.random.seed(2 ) _A = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _A = ViTMAEConfig() _A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _A = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _A = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) ) # verify the logits _A = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _A = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1E-4 ) )
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> str: '''simple docstring''' return "".join(chr(ord(__lowercase ) - 32 ) if "a" <= char <= "z" else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> list[int]: '''simple docstring''' if length <= 0 or not isinstance(__lowercase , __lowercase ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
1
'''simple docstring''' # flake8: noqa # Lint as: python3 lowerCamelCase_ = [ '''VerificationMode''', '''Version''', '''disable_progress_bar''', '''enable_progress_bar''', '''is_progress_bar_enabled''', '''experimental''', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
1
'''simple docstring''' import math from datetime import datetime, timedelta def __lowercase ( __lowercase ) -> datetime: '''simple docstring''' _A = year % 19 _A = year % 4 _A = year % 7 _A = math.floor(year / 100 ) _A = math.floor((13 + 8 * leap_day_inhibits) / 25 ) _A = leap_day_inhibits / 4 _A = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 _A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _A = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon _A = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__lowercase , 4 , 18 ) else: return datetime(__lowercase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (19_94, 20_00, 20_10, 20_21, 20_23): lowerCamelCase_ = '''will be''' if year > datetime.now().year else '''was''' print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
79
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
1
'''simple docstring''' from math import factorial lowerCamelCase_ = {str(digit): factorial(digit) for digit in range(10)} def __lowercase ( __lowercase ) -> int: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("Parameter number must be int" ) if number < 0: raise ValueError("Parameter number must be greater than or equal to 0" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowercase ) ) def __lowercase ( __lowercase = 60 , __lowercase = 100_0000 ) -> int: '''simple docstring''' if not isinstance(__lowercase , __lowercase ) or not isinstance(__lowercase , __lowercase ): raise TypeError("Parameters chain_length and number_limit must be int" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( "Parameters chain_length and number_limit must be greater than 0" ) # the counter for the chains with the exact desired length _A = 0 # the cached sizes of the previous chains _A = {} for start_chain_element in range(1 , __lowercase ): # The temporary set will contain the elements of the chain _A = set() _A = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. _A = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowercase ) chain_set_length += 1 _A = digit_factorial_sum(__lowercase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] _A = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
79
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''', } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''mvp''' snake_case = ['''past_key_values'''] snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : Tuple , __UpperCAmelCase : Any=50267 , __UpperCAmelCase : Tuple=1024 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Tuple=4096 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[Any]=4096 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : List[Any]=1024 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Tuple=100 , __UpperCAmelCase : Union[str, Any]=800 , **__UpperCAmelCase : Dict , ): '''simple docstring''' _A = vocab_size _A = max_position_embeddings _A = d_model _A = encoder_ffn_dim _A = encoder_layers _A = encoder_attention_heads _A = decoder_ffn_dim _A = decoder_layers _A = decoder_attention_heads _A = dropout _A = attention_dropout _A = activation_dropout _A = activation_function _A = init_std _A = encoder_layerdrop _A = decoder_layerdrop _A = classifier_dropout _A = use_cache _A = encoder_layers _A = scale_embedding # scale factor will be sqrt(d_model) if True _A = use_prompt _A = prompt_length _A = prompt_mid_dim super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __UpperCAmelCase ): _A = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' "The config can simply be saved and uploaded again to be fixed." )
79
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
1
'''simple docstring''' from itertools import product def __lowercase ( __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = sides_number _A = max_face_number * dice_number _A = [0] * (max_total + 1) _A = 1 _A = range(__lowercase , max_face_number + 1 ) for dice_numbers in product(__lowercase , repeat=__lowercase ): _A = sum(__lowercase ) totals_frequencies[total] += 1 return totals_frequencies def __lowercase ( ) -> float: '''simple docstring''' _A = total_frequency_distribution( sides_number=4 , dice_number=9 ) _A = total_frequency_distribution( sides_number=6 , dice_number=6 ) _A = 0 _A = 9 _A = 4 * 9 _A = 6 for peter_total in range(__lowercase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _A = (4**9) * (6**6) _A = peter_wins_count / total_games_number _A = round(__lowercase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> bool: '''simple docstring''' _A = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def __lowercase ( __lowercase = 5000 ) -> int: '''simple docstring''' _A = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowercase )] for i, pentagonal_i in enumerate(__lowercase ): for j in range(__lowercase , len(__lowercase ) ): _A = pentagonal_nums[j] _A = pentagonal_i + pentagonal_j _A = pentagonal_j - pentagonal_i if is_pentagonal(__lowercase ) and is_pentagonal(__lowercase ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
79
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase_ = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''ConvNextFeatureExtractor'''] lowerCamelCase_ = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
79
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.txt'''} lowerCamelCase_ = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } lowerCamelCase_ = { '''YituTech/conv-bert-base''': 5_12, '''YituTech/conv-bert-medium-small''': 5_12, '''YituTech/conv-bert-small''': 5_12, } lowerCamelCase_ = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_INIT_CONFIGURATION snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = ConvBertTokenizer def __init__( self : Union[str, Any] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Optional[Any]="[SEP]" , __UpperCAmelCase : List[Any]="[PAD]" , __UpperCAmelCase : Optional[int]="[CLS]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : List[str] , ): '''simple docstring''' super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , ) _A = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars ): _A = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) ) _A = do_lower_case _A = strip_accents _A = tokenize_chinese_chars _A = normalizer_class(**__UpperCAmelCase ) _A = do_lower_case def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any=None ): '''simple docstring''' _A = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ): '''simple docstring''' _A = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
79
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
1
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) lowerCamelCase_ = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) lowerCamelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) lowerCamelCase_ = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) lowerCamelCase_ = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) lowerCamelCase_ = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCamelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModel) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCamelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCamelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCamelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class _UpperCAmelCase ( _BaseAutoModelClass ): """simple docstring""" snake_case = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCamelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
79
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
1
'''simple docstring''' import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase ( enum.Enum ): """simple docstring""" snake_case = '''all_checks''' snake_case = '''basic_checks''' snake_case = '''no_checks''' class _UpperCAmelCase ( snake_case_ ): """simple docstring""" class _UpperCAmelCase ( snake_case_ ): """simple docstring""" class _UpperCAmelCase ( snake_case_ ): """simple docstring""" class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __lowercase ( __lowercase , __lowercase , __lowercase=None ) -> Dict: '''simple docstring''' if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(__lowercase ) - set(__lowercase ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(__lowercase ) - set(__lowercase ) ) ) if len(set(__lowercase ) - set(__lowercase ) ) > 0: raise UnexpectedDownloadedFile(str(set(__lowercase ) - set(__lowercase ) ) ) _A = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] _A = " for " + verification_name if verification_name is not None else "" if len(__lowercase ) > 0: raise NonMatchingChecksumError( F'''Checksums didn\'t match{for_verification_name}:\n''' F'''{bad_urls}\n''' "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" class _UpperCAmelCase ( snake_case_ ): """simple docstring""" class _UpperCAmelCase ( snake_case_ ): """simple docstring""" class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __lowercase ( __lowercase , __lowercase ) -> List[str]: '''simple docstring''' if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(__lowercase ) - set(__lowercase ) ) > 0: raise ExpectedMoreSplits(str(set(__lowercase ) - set(__lowercase ) ) ) if len(set(__lowercase ) - set(__lowercase ) ) > 0: raise UnexpectedSplits(str(set(__lowercase ) - set(__lowercase ) ) ) _A = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(__lowercase ) > 0: raise NonMatchingSplitsSizesError(str(__lowercase ) ) logger.info("All the splits matched successfully." ) def __lowercase ( __lowercase , __lowercase = True ) -> dict: '''simple docstring''' if record_checksum: _A = shaaaa() with open(__lowercase , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B"" ): m.update(__lowercase ) _A = m.hexdigest() else: _A = None return {"num_bytes": os.path.getsize(__lowercase ), "checksum": checksum} def __lowercase ( __lowercase ) -> str: '''simple docstring''' if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
79
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
1
'''simple docstring''' lowerCamelCase_ = 8.314462 # Unit - J mol-1 K-1 def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float: '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __lowercase ( __lowercase , __lowercase , __lowercase ) -> float: '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
79
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
1
'''simple docstring''' from __future__ import annotations from statistics import mean def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = [0] * no_of_processes _A = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__lowercase ): _A = burst_time[i] _A = [] _A = 0 _A = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: _A = [] _A = -1 for i in range(__lowercase ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__lowercase ) if len(__lowercase ) > 0: _A = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: _A = i total_time += burst_time[target_process] completed += 1 _A = 0 _A = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def __lowercase ( __lowercase , __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = [0] * no_of_processes for i in range(__lowercase ): _A = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('''[TEST CASE 01]''') lowerCamelCase_ = 4 lowerCamelCase_ = [2, 5, 3, 7] lowerCamelCase_ = [0, 0, 0, 0] lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase_ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''') for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
79
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
1
'''simple docstring''' import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , "tf_padding" ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , "depth_multiplier" ) ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : List[Any]=0.25 , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=1024 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Union[str, Any]="relu6" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=10 , __UpperCAmelCase : Tuple=None , ): '''simple docstring''' _A = parent _A = batch_size _A = num_channels _A = image_size _A = depth_multiplier _A = min_depth _A = tf_padding _A = int(last_hidden_size * depth_multiplier ) _A = output_stride _A = hidden_act _A = classifier_dropout_prob _A = use_labels _A = is_training _A = num_labels _A = initializer_range _A = scope def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.num_labels ) _A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _A = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCAmelCase ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ): '''simple docstring''' _A = MobileNetVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ): '''simple docstring''' _A = self.num_labels _A = MobileNetVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _A = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.prepare_config_and_inputs() _A , _A , _A , _A = config_and_inputs _A = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () snake_case = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) snake_case = False snake_case = False snake_case = False snake_case = False def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = MobileNetVaModelTester(self ) _A = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(__UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' def check_hidden_states_output(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Any ): _A = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _A = outputs.hidden_states _A = 26 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _A = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = MobileNetVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowercase ( ) -> Optional[Any]: '''simple docstring''' _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _A = model(**__UpperCAmelCase ) # verify the logits _A = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _A = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''codegen''' snake_case = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : List[Any] , __UpperCAmelCase : List[str]=50400 , __UpperCAmelCase : Optional[int]=2048 , __UpperCAmelCase : str=2048 , __UpperCAmelCase : Any=4096 , __UpperCAmelCase : Union[str, Any]=28 , __UpperCAmelCase : Dict=16 , __UpperCAmelCase : Any=64 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple="gelu_new" , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Optional[int]=1E-5 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : int=50256 , __UpperCAmelCase : Optional[int]=50256 , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Dict , ): '''simple docstring''' _A = vocab_size _A = n_ctx _A = n_positions _A = n_embd _A = n_layer _A = n_head _A = n_inner _A = rotary_dim _A = activation_function _A = resid_pdrop _A = embd_pdrop _A = attn_pdrop _A = layer_norm_epsilon _A = initializer_range _A = use_cache _A = bos_token_id _A = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : str = "default" , __UpperCAmelCase : List[PatchingSpec] = None , __UpperCAmelCase : bool = False , ): '''simple docstring''' super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , "pad_token_id" , __UpperCAmelCase ): # TODO: how to do that better? _A = 0 @property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" ) _A = {0: "batch", 1: "past_sequence + sequence"} else: _A = {0: "batch", 1: "sequence"} return common_inputs @property def lowerCAmelCase ( self : Any ): '''simple docstring''' return self._config.n_layer @property def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return self._config.n_head def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ): '''simple docstring''' _A = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _A = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _A , _A = common_inputs["input_ids"].shape # Not using the same length for past_key_values _A = seqlen + 2 _A = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _A = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _A = common_inputs["attention_mask"] if self.use_past: _A = ordered_inputs["attention_mask"].dtype _A = torch.cat( [ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return 13
79
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
1
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _A = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase , cache_dir=__UpperCAmelCase ) _A = [t[-1] for t in os.walk(os.path.join(__UpperCAmelCase , os.listdir(__UpperCAmelCase )[0] , "snapshots" ) )] _A = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__UpperCAmelCase ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 4 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3 assert np.abs(np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1 _A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(__UpperCAmelCase ) == num_samples def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__UpperCAmelCase ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1 def lowerCAmelCase ( self : str ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa ) _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1 def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = FlaxDDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , ) _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , ) _A = scheduler.create_state() _A = scheduler_state _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.random.PRNGKey(0 ) _A = 50 _A = jax.device_count() _A = num_samples * [prompt] _A = pipeline.prepare_inputs(__UpperCAmelCase ) # shard inputs and rng _A = replicate(__UpperCAmelCase ) _A = jax.random.split(__UpperCAmelCase , __UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3 assert np.abs((np.abs(__UpperCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1 def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) _A = jax.device_count() _A = num_samples * [prompt] _A = jax.random.split(jax.random.PRNGKey(0 ) , __UpperCAmelCase ) _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , ) _A = replicate(__UpperCAmelCase ) _A = pipeline.prepare_inputs(__UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images.shape == (num_samples, 1, 512, 512, 3) _A = images[2, 0, 256, 10:17, 1] # With memory efficient attention _A , _A = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__UpperCAmelCase , use_memory_efficient_attention=__UpperCAmelCase , ) _A = replicate(__UpperCAmelCase ) _A = pipeline.prepare_inputs(__UpperCAmelCase ) _A = shard(__UpperCAmelCase ) _A = pipeline(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , jit=__UpperCAmelCase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) _A = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
79
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowerCamelCase_ = 25_60_47 lowerCamelCase_ = 25_61_45 @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = NllbTokenizer snake_case = NllbTokenizerFast snake_case = True snake_case = True snake_case = {} def lowerCAmelCase ( self : str ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _A = NllbTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = NllbTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) _A = tokenizer.tokenize("This is a test" ) self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _A = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _A = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _A = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) _A = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) _A = tempfile.mkdtemp() _A = tokenizer_r.save_pretrained(__UpperCAmelCase ) _A = tokenizer_p.save_pretrained(__UpperCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) _A = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase ) # Checks everything loads correctly in the same way _A = tokenizer_r.from_pretrained(__UpperCAmelCase ) _A = tokenizer_p.from_pretrained(__UpperCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) ) shutil.rmtree(__UpperCAmelCase ) # Save tokenizer rust, legacy_format=True _A = tempfile.mkdtemp() _A = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase ) _A = tokenizer_p.save_pretrained(__UpperCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase ) # Checks everything loads correctly in the same way _A = tokenizer_r.from_pretrained(__UpperCAmelCase ) _A = tokenizer_p.from_pretrained(__UpperCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) ) shutil.rmtree(__UpperCAmelCase ) # Save tokenizer rust, legacy_format=False _A = tempfile.mkdtemp() _A = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase ) _A = tokenizer_p.save_pretrained(__UpperCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _A = tokenizer_r.from_pretrained(__UpperCAmelCase ) _A = tokenizer_p.from_pretrained(__UpperCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase ) ) shutil.rmtree(__UpperCAmelCase ) @require_torch def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if not self.test_seqaseq: return _A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. _A = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] _A = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" " Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi" " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: _A = tokenizer.prepare_seqaseq_batch( src_texts=__UpperCAmelCase , tgt_texts=__UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified _A = tokenizer.prepare_seqaseq_batch( __UpperCAmelCase , tgt_texts=__UpperCAmelCase , max_length=3 , return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) _A = tokenizer.prepare_seqaseq_batch( src_texts=__UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("decoder_input_ids" , __UpperCAmelCase ) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _A = [AddedToken("<special>" , lstrip=__UpperCAmelCase )] _A = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase ) _A = tokenizer_r.encode("Hey this is a <special> token" ) _A = tokenizer_r.encode("<special>" , add_special_tokens=__UpperCAmelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: _A = self.rust_tokenizer_class.from_pretrained( __UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) _A = self.tokenizer_class.from_pretrained( __UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase ) _A = tokenizer_p.encode("Hey this is a <special> token" ) _A = tokenizer_cr.encode("Hey this is a <special> token" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = '''facebook/nllb-200-distilled-600M''' snake_case = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] snake_case = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] snake_case = [ 25_60_47, 1_62_97, 13_44_08, 81_65, 24_80_66, 1_47_34, 9_50, 11_35, 10_57_21, 35_73, 83, 2_73_52, 1_08, 4_94_86, 2, ] @classmethod def lowerCAmelCase ( cls : Optional[int] ): '''simple docstring''' _A = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" ) _A = 1 return cls def lowerCAmelCase ( self : List[str] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 256057 ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids ) # fmt: off _A = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on _A = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , __UpperCAmelCase ) _A = 10 _A = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , __UpperCAmelCase ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [256203, 3] ) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = tempfile.mkdtemp() _A = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__UpperCAmelCase ) _A = NllbTokenizer.from_pretrained(__UpperCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase ) @require_torch def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) _A = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) _A = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt" ) _A = self.tokenizer( text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt" ) _A = targets["input_ids"] _A = shift_tokens_right( __UpperCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , { # A, test, EOS, en_XX "input_ids": [[256047, 70, 7356, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256057, } , ) @require_torch def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = True _A = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) _A = False _A = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) _A = AutoTokenizer.from_pretrained("google/mt5-small" ) _A = tokenizer("Hello there" , return_tensors="tf" ).input_ids _A = tokenizer("Hi I am" , return_tensors="tf" ).input_ids _A = model(__UpperCAmelCase , labels=__UpperCAmelCase ).loss _A = -tf.math.reduce_mean(__UpperCAmelCase ).numpy() _A = -21.228168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
79
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
1
'''simple docstring''' import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = parent def lowerCAmelCase ( self : int ): '''simple docstring''' return {} def __lowercase ( ) -> Tuple: '''simple docstring''' _A = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" _A = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = MarkupLMFeatureExtractor if is_bsa_available() else None def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = MarkupLMFeatureExtractionTester(self ) @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return self.feature_extract_tester.prepare_feat_extract_dict() def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.feature_extraction_class() # Test not batched input _A = get_html_strings()[0] _A = feature_extractor(__UpperCAmelCase ) # fmt: off _A = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] _A = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , __UpperCAmelCase ) self.assertEqual(encoding.xpaths , __UpperCAmelCase ) # Test batched _A = get_html_strings() _A = feature_extractor(__UpperCAmelCase ) # fmt: off _A = expected_nodes + [["My First Heading", "My first paragraph."]] _A = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , __UpperCAmelCase ) self.assertEqual(encoding.xpaths , __UpperCAmelCase )
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase ) -> bool: '''simple docstring''' _A = len(__lowercase ) + 1 _A = len(__lowercase ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. _A = [[0 for i in range(__lowercase )] for j in range(__lowercase )] # since string of zero length match pattern of zero length _A = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , __lowercase ): _A = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , __lowercase ): _A = dp[0][j - 2] if pattern[j - 1] == "*" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , __lowercase ): for j in range(1 , __lowercase ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": _A = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: _A = 1 elif pattern[j - 2] in (input_string[i - 1], "."): _A = dp[i - 1][j] else: _A = 0 else: _A = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowerCamelCase_ = '''aab''' lowerCamelCase_ = '''c*a*b''' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F"""{input_string} matches the given pattern {pattern}""") else: print(F"""{input_string} does not match with the given pattern {pattern}""")
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) snake_case = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) snake_case = "question" snake_case = "context" snake_case = "answers" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''andreasmadsen/efficient_mlm_m0.40''': ( '''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json''' ), } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''roberta-prelayernorm''' def __init__( self : Union[str, Any] , __UpperCAmelCase : List[str]=50265 , __UpperCAmelCase : int=768 , __UpperCAmelCase : List[Any]=12 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : str=3072 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=512 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : List[Any]=1E-12 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[str]="absolute" , __UpperCAmelCase : Any=True , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout class _UpperCAmelCase ( snake_case_ ): """simple docstring""" @property def lowerCAmelCase ( self : str ): '''simple docstring''' if self.task == "multiple-choice": _A = {0: "batch", 1: "choice", 2: "sequence"} else: _A = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''microsoft/swinv2-tiny-patch4-window8-256''': ( '''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json''' ), } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''swinv2''' snake_case = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[int] , __UpperCAmelCase : str=224 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Union[str, Any]=96 , __UpperCAmelCase : int=[2, 2, 6, 2] , __UpperCAmelCase : List[Any]=[3, 6, 12, 24] , __UpperCAmelCase : Any=7 , __UpperCAmelCase : Dict=4.0 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : str=False , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : str=1E-5 , __UpperCAmelCase : List[str]=32 , **__UpperCAmelCase : Optional[int] , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) _A = image_size _A = patch_size _A = num_channels _A = embed_dim _A = depths _A = len(__UpperCAmelCase ) _A = num_heads _A = window_size _A = mlp_ratio _A = qkv_bias _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = drop_path_rate _A = hidden_act _A = use_absolute_embeddings _A = layer_norm_eps _A = initializer_range _A = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _A = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) ) _A = (0, 0, 0, 0)
79
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: '''simple docstring''' _A , _A = len(__lowercase ), len(grid[0] ) if ( min(__lowercase , __lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _A = 0 count += depth_first_search(__lowercase , row + 1 , __lowercase , __lowercase ) count += depth_first_search(__lowercase , row - 1 , __lowercase , __lowercase ) count += depth_first_search(__lowercase , __lowercase , col + 1 , __lowercase ) count += depth_first_search(__lowercase , __lowercase , col - 1 , __lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
1
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = VQModel snake_case = '''sample''' @property def lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any]=(32, 32) ): '''simple docstring''' _A = 4 _A = 3 _A = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase ) return {"sample": image} @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return (3, 32, 32) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return (3, 32, 32) def lowerCAmelCase ( self : int ): '''simple docstring''' _A = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : str ): '''simple docstring''' pass def lowerCAmelCase ( self : Any ): '''simple docstring''' _A , _A = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__UpperCAmelCase ) _A = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = VQModel.from_pretrained("fusing/vqgan-dummy" ) model.to(__UpperCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _A = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _A = image.to(__UpperCAmelCase ) with torch.no_grad(): _A = model(__UpperCAmelCase ).sample _A = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
79
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
79
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
1
'''simple docstring''' import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = ['''image_processor''', '''tokenizer'''] snake_case = '''AutoImageProcessor''' snake_case = '''AutoTokenizer''' def __init__( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , __UpperCAmelCase , ) _A = kwargs.pop("feature_extractor" ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) _A = self.image_processor _A = False def __call__( self : int , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Dict ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) _A = kwargs.pop("images" , __UpperCAmelCase ) _A = kwargs.pop("text" , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _A = args[0] _A = args[1:] if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: _A = self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: _A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _A = encodings["input_ids"] return inputs def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Any ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @contextmanager def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your images inputs, or in a separate call." ) _A = True _A = self.tokenizer yield _A = self.image_processor _A = False def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=None ): '''simple docstring''' if added_vocab is None: _A = self.tokenizer.get_added_vocab() _A = {} while tokens: _A = re.search(R"<s_(.*?)>" , __UpperCAmelCase , re.IGNORECASE ) if start_token is None: break _A = start_token.group(1 ) _A = re.search(Rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE ) _A = start_token.group() if end_token is None: _A = tokens.replace(__UpperCAmelCase , "" ) else: _A = end_token.group() _A = re.escape(__UpperCAmelCase ) _A = re.escape(__UpperCAmelCase ) _A = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE ) if content is not None: _A = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _A = self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase ) if value: if len(__UpperCAmelCase ) == 1: _A = value[0] _A = value else: # leaf nodes _A = [] for leaf in content.split(R"<sep/>" ): _A = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _A = leaf[1:-2] # for categorical special tokens output[key].append(__UpperCAmelCase ) if len(output[key] ) == 1: _A = output[key][0] _A = tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase ) if len(__UpperCAmelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , ) return self.image_processor_class @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , ) return self.image_processor
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
79
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
1
'''simple docstring''' import operator as op lowerCamelCase_ = '''scaler.pt''' lowerCamelCase_ = '''pytorch_model''' lowerCamelCase_ = '''random_states''' lowerCamelCase_ = '''optimizer''' lowerCamelCase_ = '''scheduler''' lowerCamelCase_ = '''pytorch_model.bin''' lowerCamelCase_ = '''pytorch_model.bin.index.json''' lowerCamelCase_ = '''model.safetensors''' lowerCamelCase_ = '''model.safetensors.index.json''' lowerCamelCase_ = '''1.10.2''' lowerCamelCase_ = '''py38''' lowerCamelCase_ = '''4.17.0''' lowerCamelCase_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] lowerCamelCase_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] lowerCamelCase_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] lowerCamelCase_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] lowerCamelCase_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] lowerCamelCase_ = '''2.0.1''' lowerCamelCase_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] lowerCamelCase_ = ['''default''', '''reduce-overhead''', '''max-autotune'''] lowerCamelCase_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCamelCase_ = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] lowerCamelCase_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] lowerCamelCase_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
79
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> list[list[int]]: '''simple docstring''' _A = [] if len(__lowercase ) == 1: return [nums.copy()] for _ in range(len(__lowercase ) ): _A = nums.pop(0 ) _A = permute(__lowercase ) for perm in permutations: perm.append(__lowercase ) result.extend(__lowercase ) nums.append(__lowercase ) return result def __lowercase ( __lowercase ) -> Any: '''simple docstring''' def backtrack(__lowercase ): if start == len(__lowercase ) - 1: output.append(nums[:] ) else: for i in range(__lowercase , len(__lowercase ) ): _A , _A = nums[i], nums[start] backtrack(start + 1 ) _A , _A = nums[i], nums[start] # backtrack _A = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCamelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
79
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Tuple="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : int=False , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]="None" , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : int=None , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = relative_attention _A = position_biased_input _A = pos_att_type _A = scope def lowerCAmelCase ( self : int ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFDebertaVaModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ): '''simple docstring''' _A = TFDebertaVaForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ): '''simple docstring''' _A = self.num_labels _A = TFDebertaVaForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ): '''simple docstring''' _A = self.num_labels _A = TFDebertaVaForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = TFDebertaVaForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFDebertaVaModel, '''fill-mask''': TFDebertaVaForMaskedLM, '''question-answering''': TFDebertaVaForQuestionAnswering, '''text-classification''': TFDebertaVaForSequenceClassification, '''token-classification''': TFDebertaVaForTokenClassification, '''zero-shot''': TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int ): '''simple docstring''' _A = TFDebertaVaModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="Model not available yet" ) def lowerCAmelCase ( self : str ): '''simple docstring''' pass @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" ) _A = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _A = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 )
79
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
1
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def __lowercase ( __lowercase ) -> Dict: '''simple docstring''' if isinstance(__lowercase , collections.abc.Iterable ): return x return (x, x) @require_flax class _UpperCAmelCase : """simple docstring""" def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' pass def lowerCAmelCase ( self : List[str] ): '''simple docstring''' pass def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float ): '''simple docstring''' _A = np.abs((a - b) ).max() self.assertLessEqual(__UpperCAmelCase , __UpperCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) _A = FlaxVisionTextDualEncoderModel(__UpperCAmelCase ) _A = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Dict ): '''simple docstring''' _A , _A = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) _A = {"vision_model": vision_model, "text_model": text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) _A = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : str ): '''simple docstring''' _A , _A = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) _A = {"vision_model": vision_model, "text_model": text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) _A = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) _A = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase ) _A = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) _A = after_output[0] _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-3 ) def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[Any] ): '''simple docstring''' _A , _A = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) _A = {"vision_model": vision_model, "text_model": text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) _A = model( input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase ) _A = output.vision_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _A = to_atuple(vision_model.config.image_size ) _A = to_atuple(vision_model.config.patch_size ) _A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _A = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _A = output.text_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' pt_model.to(__UpperCAmelCase ) pt_model.eval() # prepare inputs _A = inputs_dict _A = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _A = pt_model(**__UpperCAmelCase ).to_tuple() _A = fx_model(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__UpperCAmelCase ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) _A = fx_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__UpperCAmelCase ) _A = VisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase ) pt_model_loaded.to(__UpperCAmelCase ) pt_model_loaded.eval() with torch.no_grad(): _A = pt_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__UpperCAmelCase , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str ): '''simple docstring''' _A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) _A = VisionTextDualEncoderModel(__UpperCAmelCase ) _A = FlaxVisionTextDualEncoderModel(__UpperCAmelCase ) _A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase ) _A = fx_state self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ): '''simple docstring''' _A = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) _A = VisionTextDualEncoderModel(__UpperCAmelCase ) _A = FlaxVisionTextDualEncoderModel(__UpperCAmelCase ) _A = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params ) self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() self.check_save_load(**__UpperCAmelCase ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__UpperCAmelCase ) @is_pt_flax_cross_test def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.prepare_config_and_inputs() _A = config_inputs_dict.pop("vision_config" ) _A = config_inputs_dict.pop("text_config" ) _A = config_inputs_dict self.check_equivalence_pt_to_flax(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.check_equivalence_flax_to_pt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @slow def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A , _A = self.get_pretrained_model_and_inputs() _A = model_a(**__UpperCAmelCase ) _A = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__UpperCAmelCase ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase ) _A = model_a(**__UpperCAmelCase ) _A = after_outputs[0] _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1E-5 ) @require_flax class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , ) _A = 13 _A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _A = random_attention_mask([batch_size, 4] ) _A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = FlaxViTModel(__UpperCAmelCase ) _A = FlaxBertModel(__UpperCAmelCase ) return vision_model, text_model def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = FlaxViTModelTester(self ) _A = FlaxBertModelTester(self ) _A = vit_model_tester.prepare_config_and_inputs() _A = bert_model_tester.prepare_config_and_inputs() _A , _A = vision_config_and_inputs _A , _A , _A , _A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class _UpperCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , ) _A = 13 _A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _A = random_attention_mask([batch_size, 4] ) _A = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ): '''simple docstring''' _A = FlaxCLIPVisionModel(__UpperCAmelCase ) _A = FlaxBertModel(__UpperCAmelCase ) return vision_model, text_model def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = FlaxCLIPVisionModelTester(self ) _A = FlaxBertModelTester(self ) _A = clip_model_tester.prepare_config_and_inputs() _A = bert_model_tester.prepare_config_and_inputs() _A , _A = vision_config_and_inputs _A , _A , _A , _A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : int ): '''simple docstring''' _A = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 ) _A = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) _A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _A = processor( text=["una foto di un gatto", "una foto di un cane"] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="np" ) _A = model(**__UpperCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _A = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1E-3 ) )
79
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
1
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase_ = logging.getLogger(__name__) lowerCamelCase_ = 50 # max width of layer names lowerCamelCase_ = 70 # max width of quantizer names def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = parser.add_argument_group("quant_trainer arguments" ) group.add_argument("--wprec" , type=__lowercase , default=8 , help="weight precision" ) group.add_argument("--aprec" , type=__lowercase , default=8 , help="activation precision" ) group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" ) group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" ) group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" ) group.add_argument("--quant-disable-keyword" , type=__lowercase , nargs="+" , help="disable quantizers by keyword" ) group.add_argument("--quant-disable-layer-module" , type=__lowercase , help="disable quantizers by keyword under layer." ) group.add_argument("--quant-enable-layer-module" , type=__lowercase , help="enable quantizers by keyword under layer" ) group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" ) group.add_argument("--percentile" , default=__lowercase , type=__lowercase , help="percentile for PercentileCalibrator" ) group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" ) group.add_argument("--clip-gelu" , metavar="N" , type=__lowercase , help="clip gelu output maximum value to N" ) group.add_argument( "--recalibrate-weights" , action="store_true" , help=( "recalibrate weight amaxes by taking the max of the weights." " amaxes will be computed with the current quantization granularity (axis)." ) , ) def __lowercase ( __lowercase ) -> Tuple: '''simple docstring''' if args.calibrator == "max": _A = "max" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("Specify --percentile when using percentile calibrator" ) _A = "histogram" elif args.calibrator == "mse": _A = "histogram" else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=__lowercase ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(__lowercase ) quant_nn.QuantLinear.set_default_quant_desc_weight(__lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase=False , __lowercase=False ) -> Dict: '''simple docstring''' logger.info("Configuring Model for Quantization" ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(__lowercase , ["embeddings"] , which="weight" , _disabled=__lowercase ) if args.quant_disable: set_quantizer_by_name(__lowercase , [""] , _disabled=__lowercase ) if args.quant_disable_keyword: set_quantizer_by_name(__lowercase , args.quant_disable_keyword , _disabled=__lowercase ) if args.quant_disable_layer_module: set_quantizer_by_name(__lowercase , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=__lowercase ) if args.quant_enable_layer_module: set_quantizer_by_name(__lowercase , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=__lowercase ) if args.recalibrate_weights: recalibrate_weights(__lowercase ) if args.fuse_qkv: fuse_qkv(__lowercase , __lowercase ) if args.clip_gelu: clip_gelu(__lowercase , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(__lowercase ) def __lowercase ( __lowercase ) -> Tuple: '''simple docstring''' logger.info("Enabling Calibration" ) for name, module in model.named_modules(): if name.endswith("_quantizer" ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("Loading calibrated amax" ) for name, module in model.named_modules(): if name.endswith("_quantizer" ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax("percentile" , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(__lowercase ) def __lowercase ( __lowercase , __lowercase ) -> Union[str, Any]: '''simple docstring''' def fusea(__lowercase , __lowercase , __lowercase ): for mod in [qq, qk, qv]: if not hasattr(__lowercase , "_amax" ): print(" WARNING: NO AMAX BUFFER" ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(__lowercase , __lowercase , __lowercase ) qq._amax.fill_(__lowercase ) qk._amax.fill_(__lowercase ) qv._amax.fill_(__lowercase ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith(".attention.self" ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=__lowercase ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def __lowercase ( __lowercase ) -> int: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(__lowercase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(__lowercase , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def __lowercase ( __lowercase ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(__lowercase , "_weight_quantizer" ): if not hasattr(mod.weight_quantizer , "_amax" ): print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowercase , keepdims=__lowercase ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def __lowercase ( __lowercase , __lowercase=25 , __lowercase=180 , __lowercase=None ) -> int: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(__lowercase , __lowercase ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(__lowercase , "weight" ): continue _A = max(__lowercase , len(__lowercase ) ) for name, mod in model.named_modules(): _A = getattr(__lowercase , "_input_quantizer" , __lowercase ) _A = getattr(__lowercase , "_weight_quantizer" , __lowercase ) if not hasattr(__lowercase , "weight" ): continue if type(__lowercase ) in ignore: continue if [True for s in ignore if type(__lowercase ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(__lowercase ) <= line_width: logger.info(__lowercase ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{' ':{name_width}} {wgt_str}''' ) def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(__lowercase , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' _A = getattr(__lowercase , __lowercase , __lowercase ) if quantizer_mod is not None: assert hasattr(__lowercase , __lowercase ) setattr(__lowercase , __lowercase , __lowercase ) else: logger.warning(F'''{name} has no {quantizer}''' ) def __lowercase ( __lowercase , __lowercase , __lowercase="both" , **__lowercase ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(__lowercase , __lowercase , "_input_quantizer" , __lowercase , __lowercase ) if which in ["weight", "both"]: set_quantizer(__lowercase , __lowercase , "_weight_quantizer" , __lowercase , __lowercase ) logger.info(__lowercase ) def __lowercase ( __lowercase , __lowercase , **__lowercase ) -> Optional[int]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(__lowercase , "_input_quantizer" ) or hasattr(__lowercase , "_weight_quantizer" ): for n in names: if re.search(__lowercase , __lowercase ): set_quantizers(__lowercase , __lowercase , **__lowercase ) elif name.endswith("_quantizer" ): for n in names: if re.search(__lowercase , __lowercase ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(__lowercase , __lowercase , __lowercase ) logger.info(__lowercase )
79
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor lowerCamelCase_ = logging.get_logger(__name__) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' warnings.warn( "The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use DeformableDetrImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
79
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) lowerCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowercase ( __lowercase ) -> Optional[int]: '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _A = model_type_to_module_name(__lowercase ) _A = importlib.import_module(F'''.{module_name}''' , "transformers.models" ) try: return getattr(__lowercase , __lowercase ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(__lowercase , "__name__" , __lowercase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A = importlib.import_module("transformers" ) if hasattr(__lowercase , __lowercase ): return getattr(__lowercase , __lowercase ) return None def __lowercase ( __lowercase , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , **__lowercase , ) -> Tuple: '''simple docstring''' _A = get_file_from_repo( __lowercase , __lowercase , cache_dir=__lowercase , force_download=__lowercase , resume_download=__lowercase , proxies=__lowercase , use_auth_token=__lowercase , revision=__lowercase , local_files_only=__lowercase , ) if resolved_config_file is None: logger.info( "Could not locate the image processor configuration file, will try to use the model config instead." ) return {} with open(__lowercase , encoding="utf-8" ) as reader: return json.load(__lowercase ) class _UpperCAmelCase : """simple docstring""" def __init__( self : int ): '''simple docstring''' raise EnvironmentError( "AutoImageProcessor is designed to be instantiated " "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(__UpperCAmelCase ) def lowerCAmelCase ( cls : List[str] , __UpperCAmelCase : str , **__UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = kwargs.pop("config" , __UpperCAmelCase ) _A = kwargs.pop("trust_remote_code" , __UpperCAmelCase ) _A = True _A , _A = ImageProcessingMixin.get_image_processor_dict(__UpperCAmelCase , **__UpperCAmelCase ) _A = config_dict.get("image_processor_type" , __UpperCAmelCase ) _A = None if "AutoImageProcessor" in config_dict.get("auto_map" , {} ): _A = config_dict["auto_map"]["AutoImageProcessor"] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _A = config_dict.pop("feature_extractor_type" , __UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( "Could not find image processor class in the image processor config or the model config. Loading" " based on pattern matching with the model's feature extractor configuration." ) _A = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" ) if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): _A = config_dict["auto_map"]["AutoFeatureExtractor"] _A = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" ) logger.warning( "Could not find image processor auto map in the image processor config or the model config." " Loading based on pattern matching with the model's feature extractor configuration." ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) # It could be in `config.image_processor_type`` _A = getattr(__UpperCAmelCase , "image_processor_type" , __UpperCAmelCase ) if hasattr(__UpperCAmelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map: _A = config.auto_map["AutoImageProcessor"] if image_processor_class is not None: _A = image_processor_class_from_name(__UpperCAmelCase ) _A = image_processor_auto_map is not None _A = image_processor_class is not None or type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _A = resolve_trust_remote_code( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if has_remote_code and trust_remote_code: _A = get_class_from_dynamic_module( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) _A = kwargs.pop("code_revision" , __UpperCAmelCase ) if os.path.isdir(__UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _A = IMAGE_PROCESSOR_MAPPING[type(__UpperCAmelCase )] return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) raise ValueError( f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowerCAmelCase ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' IMAGE_PROCESSOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') lowerCamelCase_ = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: lowerCamelCase_ = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowerCamelCase_ = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
79
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
1
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = TypeVar('''DatasetType''', Dataset, IterableDataset) def __lowercase ( __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = "first_exhausted" , ) -> DatasetType: '''simple docstring''' from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(__lowercase ): if not isinstance(__lowercase , (Dataset, IterableDataset) ): if isinstance(__lowercase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' "is an empty dataset dictionary." ) raise ValueError( F'''Dataset at position {i} has at least one split: {list(__lowercase )}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowercase ) )}\']''' ) raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowercase ).__name__}.''' ) if i == 0: _A , _A = ( (Dataset, IterableDataset) if isinstance(__lowercase , __lowercase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowercase , __lowercase ): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' ) if dataset_type is Dataset: return _interleave_map_style_datasets( __lowercase , __lowercase , __lowercase , info=__lowercase , split=__lowercase , stopping_strategy=__lowercase ) else: return _interleave_iterable_datasets( __lowercase , __lowercase , __lowercase , info=__lowercase , split=__lowercase , stopping_strategy=__lowercase ) def __lowercase ( __lowercase , __lowercase = None , __lowercase = None , __lowercase = 0 , ) -> DatasetType: '''simple docstring''' if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(__lowercase ): if not isinstance(__lowercase , (Dataset, IterableDataset) ): if isinstance(__lowercase , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} ''' "is an empty dataset dictionary." ) raise ValueError( F'''Dataset at position {i} has at least one split: {list(__lowercase )}\n''' F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(__lowercase ) )}\']''' ) raise ValueError( F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__lowercase ).__name__}.''' ) if i == 0: _A , _A = ( (Dataset, IterableDataset) if isinstance(__lowercase , __lowercase ) else (IterableDataset, Dataset) ) elif not isinstance(__lowercase , __lowercase ): raise ValueError( F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__lowercase , info=__lowercase , split=__lowercase , axis=__lowercase ) else: return _concatenate_iterable_datasets(__lowercase , info=__lowercase , split=__lowercase , axis=__lowercase )
79
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
1
'''simple docstring''' # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( '''pipelines_utils''', '''0.22.0''', '''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''', standard_warn=False, stacklevel=3, )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase ) -> bool: '''simple docstring''' if len(__lowercase ) < 2: raise ValueError("Monogons and Digons are not polygons in the Euclidean space" ) if any(i <= 0 for i in nums ): raise ValueError("All values must be greater than 0" ) _A = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
1
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports lowerCamelCase_ = ''' import os ''' lowerCamelCase_ = ''' def foo(): import os return False ''' lowerCamelCase_ = ''' def foo(): def bar(): if True: import os return False return bar() ''' lowerCamelCase_ = ''' import os try: import bar except ImportError: raise ValueError() ''' lowerCamelCase_ = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' lowerCamelCase_ = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' lowerCamelCase_ = ''' import os try: import bar except ImportError as e: raise ValueError() ''' lowerCamelCase_ = ''' import os try: import bar except: raise ValueError() ''' lowerCamelCase_ = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' lowerCamelCase_ = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' lowerCamelCase_ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = os.path.join(__lowercase , "test_file.py" ) with open(__lowercase , "w" ) as _tmp_file: _tmp_file.write(__lowercase ) _A = get_imports(__lowercase ) assert parsed_imports == ["os"]
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''', '''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''', } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''markuplm''' def __init__( self : List[str] , __UpperCAmelCase : Optional[Any]=30522 , __UpperCAmelCase : str=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : str=1E-12 , __UpperCAmelCase : str=0 , __UpperCAmelCase : int=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[int]=256 , __UpperCAmelCase : Any=1024 , __UpperCAmelCase : Union[str, Any]=216 , __UpperCAmelCase : Any=1001 , __UpperCAmelCase : int=32 , __UpperCAmelCase : List[str]=50 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[Any] , ): '''simple docstring''' super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = classifier_dropout # additional properties _A = max_depth _A = max_xpath_tag_unit_embeddings _A = max_xpath_subs_unit_embeddings _A = tag_pad_id _A = subs_pad_id _A = xpath_unit_hidden_size
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase_ = { '''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FalconForCausalLM''', '''FalconModel''', '''FalconPreTrainedModel''', '''FalconForSequenceClassification''', '''FalconForTokenClassification''', '''FalconForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
1
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase ) -> list[int]: '''simple docstring''' _A = 0 _A = len(__lowercase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _A = i + 1 else: _A = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
1
'''simple docstring''' from ...processing_utils import ProcessorMixin class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''SpeechT5FeatureExtractor''' snake_case = '''SpeechT5Tokenizer''' def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self : Dict , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : str ): '''simple docstring''' _A = kwargs.pop("audio" , __UpperCAmelCase ) _A = kwargs.pop("text" , __UpperCAmelCase ) _A = kwargs.pop("text_target" , __UpperCAmelCase ) _A = kwargs.pop("audio_target" , __UpperCAmelCase ) _A = kwargs.pop("sampling_rate" , __UpperCAmelCase ) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: _A = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) elif text is not None: _A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) else: _A = None if audio_target is not None: _A = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) _A = targets["input_values"] elif text_target is not None: _A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) _A = targets["input_ids"] else: _A = None if inputs is None: return targets if targets is not None: _A = labels _A = targets.get("attention_mask" ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase ( self : int , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : List[Any] ): '''simple docstring''' _A = kwargs.pop("input_values" , __UpperCAmelCase ) _A = kwargs.pop("input_ids" , __UpperCAmelCase ) _A = kwargs.pop("labels" , __UpperCAmelCase ) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs." ) if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: _A = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) elif input_ids is not None: _A = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase ) else: _A = None if labels is not None: if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]): _A = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase ) _A = targets["input_ids"] else: _A = self.feature_extractor.feature_size _A = self.feature_extractor.num_mel_bins _A = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) _A = feature_size_hack _A = targets["input_values"] else: _A = None if inputs is None: return targets if targets is not None: _A = labels _A = targets.get("attention_mask" ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase ( self : Dict , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Dict , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : str ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
79
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
1
'''simple docstring''' from math import ceil, sqrt def __lowercase ( __lowercase = 100_0000 ) -> int: '''simple docstring''' _A = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: _A = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: _A = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
79
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowerCamelCase_ = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowerCamelCase_ = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' lowerCamelCase_ = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : Any ): '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , ): '''simple docstring''' _A = len(references[0] ) if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) _A = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )] _A = TER( normalized=__UpperCAmelCase , no_punct=__UpperCAmelCase , asian_support=__UpperCAmelCase , case_sensitive=__UpperCAmelCase , ) _A = sb_ter.corpus_score(__UpperCAmelCase , __UpperCAmelCase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
79
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
1
'''simple docstring''' from __future__ import annotations lowerCamelCase_ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> tuple[list[list[int]], list[list[int]]]: '''simple docstring''' _A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__lowercase ) ) ] # the reference grid _A = 1 _A = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__lowercase ) ) ] # the action grid _A = init[0] _A = init[1] _A = 0 _A = g + heuristic[x][y] # cost from starting cell to destination cell _A = [[f, g, x, y]] _A = False # flag that is set when search is complete _A = False # flag set if we can't find expand while not found and not resign: if len(__lowercase ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() _A = cell.pop() _A = next_cell[2] _A = next_cell[3] _A = next_cell[1] if x == goal[0] and y == goal[1]: _A = True else: for i in range(len(__lowercase ) ): # to try out different valid actions _A = x + DIRECTIONS[i][0] _A = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__lowercase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: _A = g + cost _A = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) _A = 1 _A = i _A = [] _A = goal[0] _A = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: _A = x - DIRECTIONS[action[x][y]][0] _A = y - DIRECTIONS[action[x][y]][1] _A = xa _A = ya invpath.append([x, y] ) _A = [] for i in range(len(__lowercase ) ): path.append(invpath[len(__lowercase ) - 1 - i] ) return path, action if __name__ == "__main__": lowerCamelCase_ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] lowerCamelCase_ = [0, 0] # all coordinates are given in format [y,x] lowerCamelCase_ = [len(grid) - 1, len(grid[0]) - 1] lowerCamelCase_ = 1 # the cost map which pushes the path closer to the goal lowerCamelCase_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): lowerCamelCase_ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map lowerCamelCase_ = 99 lowerCamelCase_ , lowerCamelCase_ = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCamelCase_ = logging.get_logger(__name__) class _UpperCAmelCase : """simple docstring""" def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = question_encoder _A = generator _A = self.question_encoder def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' if os.path.isfile(__UpperCAmelCase ): raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' ) os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase ) _A = os.path.join(__UpperCAmelCase , "question_encoder_tokenizer" ) _A = os.path.join(__UpperCAmelCase , "generator_tokenizer" ) self.question_encoder.save_pretrained(__UpperCAmelCase ) self.generator.save_pretrained(__UpperCAmelCase ) @classmethod def lowerCAmelCase ( cls : Tuple , __UpperCAmelCase : Any , **__UpperCAmelCase : List[str] ): '''simple docstring''' from ..auto.tokenization_auto import AutoTokenizer _A = kwargs.pop("config" , __UpperCAmelCase ) if config is None: _A = RagConfig.from_pretrained(__UpperCAmelCase ) _A = AutoTokenizer.from_pretrained( __UpperCAmelCase , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) _A = AutoTokenizer.from_pretrained( __UpperCAmelCase , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=__UpperCAmelCase , generator=__UpperCAmelCase ) def __call__( self : int , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' return self.current_tokenizer(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ): '''simple docstring''' return self.generator.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : Any , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ): '''simple docstring''' return self.generator.decode(*__UpperCAmelCase , **__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.question_encoder def lowerCAmelCase ( self : int ): '''simple docstring''' _A = self.generator def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "longest" , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Optional[Any] , ): '''simple docstring''' warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , __UpperCAmelCase , ) if max_length is None: _A = self.current_tokenizer.model_max_length _A = self( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: _A = self.current_tokenizer.model_max_length _A = self( text_target=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase , ) _A = labels["input_ids"] return model_inputs
79
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> List[str]: '''simple docstring''' _A = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("head" ): _A = "segformer.encoder." + key if key.startswith("backbone" ): _A = key.replace("backbone" , "segformer.encoder" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _A = key[key.find("patch_embed" ) + len("patch_embed" )] _A = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__lowercase )-1}''' ) if "norm" in key: _A = key.replace("norm" , "layer_norm" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _A = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )] _A = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__lowercase )-1}''' ) if "layer_norm1" in key: _A = key.replace("layer_norm1" , "layer_norm_1" ) if "layer_norm2" in key: _A = key.replace("layer_norm2" , "layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 _A = key[key.find("block" ) + len("block" )] _A = key.replace(F'''block{idx}''' , F'''block.{int(__lowercase )-1}''' ) if "attn.q" in key: _A = key.replace("attn.q" , "attention.self.query" ) if "attn.proj" in key: _A = key.replace("attn.proj" , "attention.output.dense" ) if "attn" in key: _A = key.replace("attn" , "attention.self" ) if "fc1" in key: _A = key.replace("fc1" , "dense1" ) if "fc2" in key: _A = key.replace("fc2" , "dense2" ) if "linear_pred" in key: _A = key.replace("linear_pred" , "classifier" ) if "linear_fuse" in key: _A = key.replace("linear_fuse.conv" , "linear_fuse" ) _A = key.replace("linear_fuse.bn" , "batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _A = key[key.find("linear_c" ) + len("linear_c" )] _A = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__lowercase )-1}''' ) if key.startswith("head" ): _A = key.replace("head" , "classifier" ) _A = value return new_state_dict def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]: '''simple docstring''' for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _A = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) _A = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict _A = kv_weight[ : config.hidden_sizes[i], : ] _A = kv_bias[: config.hidden_sizes[i]] _A = kv_weight[ config.hidden_sizes[i] :, : ] _A = kv_bias[ config.hidden_sizes[i] : ] def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return image @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str: '''simple docstring''' _A = SegformerConfig() _A = False # set attributes based on model_name _A = "huggingface/label-files" if "segformer" in model_name: _A = model_name[len("segformer." ) : len("segformer." ) + 2] if "ade" in model_name: _A = 150 _A = "ade20k-id2label.json" _A = (1, 150, 128, 128) elif "city" in model_name: _A = 19 _A = "cityscapes-id2label.json" _A = (1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: _A = True _A = model_name[4:6] _A = 1000 _A = "imagenet-1k-id2label.json" _A = (1, 1000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _A = [64, 128, 320, 512] _A = 256 elif size == "b2": _A = [64, 128, 320, 512] _A = 768 _A = [3, 4, 6, 3] elif size == "b3": _A = [64, 128, 320, 512] _A = 768 _A = [3, 4, 18, 3] elif size == "b4": _A = [64, 128, 320, 512] _A = 768 _A = [3, 8, 27, 3] elif size == "b5": _A = [64, 128, 320, 512] _A = 768 _A = [3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) _A = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__lowercase , align=__lowercase , do_random_crop=__lowercase ) # prepare image _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: _A = torch.load(__lowercase , map_location=torch.device("cpu" ) ) else: _A = torch.load(__lowercase , map_location=torch.device("cpu" ) )["state_dict"] # rename keys _A = rename_keys(__lowercase , encoder_only=__lowercase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(__lowercase , __lowercase ) # create HuggingFace model and load state dict if encoder_only: _A = False _A = SegformerForImageClassification(__lowercase ) else: _A = SegformerForSemanticSegmentation(__lowercase ) model.load_state_dict(__lowercase ) model.eval() # forward pass _A = model(__lowercase ) _A = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _A = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _A = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _A = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _A = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _A = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _A = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _A = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _A = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _A = torch.tensor( [ [ [-1.1372e01, -1.2787e01, -1.3477e01], [-1.2536e01, -1.4194e01, -1.4409e01], [-1.3217e01, -1.4888e01, -1.5327e01], ], [ [-1.4791e01, -1.7122e01, -1.8277e01], [-1.7163e01, -1.9192e01, -1.9533e01], [-1.7897e01, -1.9991e01, -2.0315e01], ], [ [7.6723e-01, 4.1921e-01, -7.7878e-02], [4.7772e-01, 9.5557e-03, -2.8082e-01], [3.6032e-01, -2.4826e-01, -5.1168e-01], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _A = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _A = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _A = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _A = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _A = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _A = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: _A = logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''segformer.b0.512x512.ade.160k''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) lowerCamelCase_ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
79
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
1
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case_ ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) snake_case = Features({'''text''': Value('''string''' )} ) snake_case = Features({'''summary''': Value('''string''' )} ) snake_case = "text" snake_case = "summary" @property def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return {self.text_column: "text", self.summary_column: "summary"}
79
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
1
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings( snake_case_ , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def lowerCAmelCase ( self : Any , __UpperCAmelCase : GenericTensor ): '''simple docstring''' if self.framework == "tf": _A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase ) else: raise ValueError("Unsupported framework" ) return masked_index def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : GenericTensor ): '''simple docstring''' _A = self.get_masked_index(__UpperCAmelCase ) _A = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : GenericTensor ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__UpperCAmelCase ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' if return_tensors is None: _A = self.framework _A = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) self.ensure_exactly_one_mask_token(__UpperCAmelCase ) return model_inputs def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.model(**__UpperCAmelCase ) _A = model_inputs["input_ids"] return model_outputs def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=5 , __UpperCAmelCase : str=None ): '''simple docstring''' if target_ids is not None and target_ids.shape[0] < top_k: _A = target_ids.shape[0] _A = model_outputs["input_ids"][0] _A = model_outputs["logits"] if self.framework == "tf": _A = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _A = outputs.numpy() _A = outputs[0, masked_index, :] _A = stable_softmax(__UpperCAmelCase , axis=-1 ) if target_ids is not None: _A = tf.gather_nd(tf.squeeze(__UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) _A = tf.expand_dims(__UpperCAmelCase , 0 ) _A = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase ) _A , _A = topk.values.numpy(), topk.indices.numpy() else: _A = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _A = outputs[0, masked_index, :] _A = logits.softmax(dim=-1 ) if target_ids is not None: _A = probs[..., target_ids] _A , _A = probs.topk(__UpperCAmelCase ) _A = [] _A = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _A = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _A = input_ids.numpy().copy() if target_ids is not None: _A = target_ids[p].tolist() _A = p # Filter padding out: _A = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _A = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _A = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__UpperCAmelCase ) result.append(__UpperCAmelCase ) if single_mask: return result[0] return result def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict=None ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [targets] try: _A = self.tokenizer.get_vocab() except Exception: _A = {} _A = [] for target in targets: _A = vocab.get(__UpperCAmelCase , __UpperCAmelCase ) if id_ is None: _A = self.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , max_length=1 , truncation=__UpperCAmelCase , )["input_ids"] if len(__UpperCAmelCase ) == 0: logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' "We cannot replace it with anything meaningful, ignoring it" ) continue _A = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'''The specified target token `{target}` does not exist in the model vocabulary. ''' f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) _A = list(set(__UpperCAmelCase ) ) if len(__UpperCAmelCase ) == 0: raise ValueError("At least one target must be provided when passed." ) _A = np.array(__UpperCAmelCase ) return target_ids def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=None ): '''simple docstring''' _A = {} if targets is not None: _A = self.get_target_ids(__UpperCAmelCase , __UpperCAmelCase ) _A = target_ids if top_k is not None: _A = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : List[Any] , __UpperCAmelCase : List[Any] , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : List[str] ): '''simple docstring''' _A = super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1: return outputs[0] return outputs
79
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __lowercase ( __lowercase , __lowercase=None ) -> List[Any]: '''simple docstring''' _A = None if token is not None: _A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _A = requests.get(__lowercase , headers=__lowercase ).json() _A = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _A = math.ceil((result["total_count"] - 100) / 100 ) for i in range(__lowercase ): _A = requests.get(url + F'''&page={i + 2}''' , headers=__lowercase ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __lowercase ( __lowercase , __lowercase=None ) -> Union[str, Any]: '''simple docstring''' _A = None if token is not None: _A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' _A = requests.get(__lowercase , headers=__lowercase ).json() _A = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _A = math.ceil((result["total_count"] - 100) / 100 ) for i in range(__lowercase ): _A = requests.get(url + F'''&page={i + 2}''' , headers=__lowercase ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: '''simple docstring''' _A = None if token is not None: _A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _A = requests.get(__lowercase , headers=__lowercase , allow_redirects=__lowercase ) _A = result.headers["Location"] _A = requests.get(__lowercase , allow_redirects=__lowercase ) _A = os.path.join(__lowercase , F'''{artifact_name}.zip''' ) with open(__lowercase , "wb" ) as fp: fp.write(response.content ) def __lowercase ( __lowercase , __lowercase=None ) -> Dict: '''simple docstring''' _A = [] _A = [] _A = None with zipfile.ZipFile(__lowercase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowercase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__lowercase ) as f: for line in f: _A = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _A = line[: line.index(": " )] _A = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _A = line[len("FAILED " ) :] failed_tests.append(__lowercase ) elif filename == "job_name.txt": _A = line if len(__lowercase ) != len(__lowercase ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowercase )} for `errors` ''' F'''and {len(__lowercase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem." ) _A = None if job_name and job_links: _A = job_links.get(__lowercase , __lowercase ) # A list with elements of the form (line of error, error, failed test) _A = [x + [y] + [job_link] for x, y in zip(__lowercase , __lowercase )] return result def __lowercase ( __lowercase , __lowercase=None ) -> List[Any]: '''simple docstring''' _A = [] _A = [os.path.join(__lowercase , __lowercase ) for p in os.listdir(__lowercase ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(__lowercase , job_links=__lowercase ) ) return errors def __lowercase ( __lowercase , __lowercase=None ) -> List[Any]: '''simple docstring''' _A = Counter() counter.update([x[1] for x in logs] ) _A = counter.most_common() _A = {} for error, count in counts: if error_filter is None or error not in error_filter: _A = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) ) return r def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = test.split("::" )[0] if test.startswith("tests/models/" ): _A = test.split("/" )[2] else: _A = None return test def __lowercase ( __lowercase , __lowercase=None ) -> Dict: '''simple docstring''' _A = [(x[0], x[1], get_model(x[2] )) for x in logs] _A = [x for x in logs if x[2] is not None] _A = {x[2] for x in logs} _A = {} for test in tests: _A = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _A = counter.most_common() _A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _A = sum(error_counts.values() ) if n_errors > 0: _A = {"count": n_errors, "errors": error_counts} _A = dict(sorted(r.items() , key=lambda __lowercase : item[1]["count"] , reverse=__lowercase ) ) return r def __lowercase ( __lowercase ) -> Union[str, Any]: '''simple docstring''' _A = "| no. | error | status |" _A = "|-:|:-|:-|" _A = [header, sep] for error in reduced_by_error: _A = reduced_by_error[error]["count"] _A = F'''| {count} | {error[:100]} | |''' lines.append(__lowercase ) return "\n".join(__lowercase ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = "| model | no. of errors | major error | count |" _A = "|-:|-:|-:|-:|" _A = [header, sep] for model in reduced_by_model: _A = reduced_by_model[model]["count"] _A , _A = list(reduced_by_model[model]["errors"].items() )[0] _A = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(__lowercase ) return "\n".join(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') lowerCamelCase_ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) lowerCamelCase_ = get_job_links(args.workflow_run_id, token=args.token) lowerCamelCase_ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: lowerCamelCase_ = k.find(''' / ''') lowerCamelCase_ = k[index + len(''' / ''') :] lowerCamelCase_ = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) lowerCamelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) lowerCamelCase_ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error lowerCamelCase_ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors lowerCamelCase_ = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) lowerCamelCase_ = reduce_by_error(errors) lowerCamelCase_ = reduce_by_model(errors) lowerCamelCase_ = make_github_table(reduced_by_error) lowerCamelCase_ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
79
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase ) -> List[int]: '''simple docstring''' if isinstance(__lowercase , np.ndarray ): return list(tensor.shape ) _A = tf.shape(__lowercase ) if tensor.shape == tf.TensorShape(__lowercase ): return dynamic _A = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__lowercase )] def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> tf.Tensor: '''simple docstring''' return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowercase , name=__lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=1e-5 , __lowercase=-1 ) -> List[Any]: '''simple docstring''' if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowercase , __lowercase ): raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." ) # Get mean and variance on the axis to be normalized _A , _A = tf.nn.moments(__lowercase , axes=[axis] , keepdims=__lowercase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis _A = [1] * inputs.shape.rank _A = shape_list(__lowercase )[axis] _A = tf.reshape(__lowercase , __lowercase ) _A = tf.reshape(__lowercase , __lowercase ) # Compute layer normalization using the batch_normalization # function. _A = tf.nn.batch_normalization( __lowercase , __lowercase , __lowercase , offset=__lowercase , scale=__lowercase , variance_epsilon=__lowercase , ) return outputs def __lowercase ( __lowercase , __lowercase=0 , __lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input _A = tf.shape(__lowercase ) _A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) _A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(__lowercase , __lowercase ) def __lowercase ( __lowercase ) -> tf.Tensor: '''simple docstring''' if not isinstance(__lowercase , tf.Tensor ): _A = tf.convert_to_tensor(__lowercase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: _A = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: _A = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) _A = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( __lowercase , __lowercase , __lowercase = "input_ids" ) -> None: '''simple docstring''' tf.debugging.assert_less( __lowercase , tf.cast(__lowercase , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowercase )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: '''simple docstring''' _A = 6_4512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. _A = [x for x in data if len(__lowercase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( "The following attributes cannot be saved to HDF5 file because " F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) _A = np.asarray(__lowercase ) _A = 1 _A = np.array_split(__lowercase , __lowercase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 _A = np.array_split(__lowercase , __lowercase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__lowercase ): _A = chunk_data else: _A = data def __lowercase ( __lowercase , __lowercase ) -> Tuple: '''simple docstring''' if name in group.attrs: _A = [n.decode("utf8" ) if hasattr(__lowercase , "decode" ) else n for n in group.attrs[name]] else: _A = [] _A = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode("utf8" ) if hasattr(__lowercase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( __lowercase ) -> Optional[int]: '''simple docstring''' def _expand_single_ad_tensor(__lowercase ): if isinstance(__lowercase , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__lowercase , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , __lowercase )
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
1
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) self.check_model_type(__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[str]=None , **__UpperCAmelCase : Any ): '''simple docstring''' _A , _A = {}, {} if padding is not None: _A = padding if truncation is not None: _A = truncation if top_k is not None: _A = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , __UpperCAmelCase : Union["Image.Image", str] , __UpperCAmelCase : str = None , **__UpperCAmelCase : str ): '''simple docstring''' if isinstance(__UpperCAmelCase , (Image.Image, str) ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = {"image": image, "question": question} else: _A = image _A = super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) return results def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=False , __UpperCAmelCase : Optional[int]=False ): '''simple docstring''' _A = load_image(inputs["image"] ) _A = self.tokenizer( inputs["question"] , return_tensors=self.framework , padding=__UpperCAmelCase , truncation=__UpperCAmelCase ) _A = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) model_inputs.update(__UpperCAmelCase ) return model_inputs def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.model(**__UpperCAmelCase ) return model_outputs def lowerCAmelCase ( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: _A = self.model.config.num_labels if self.framework == "pt": _A = model_outputs.logits.sigmoid()[0] _A , _A = probs.topk(__UpperCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) _A = scores.tolist() _A = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
79
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
1
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCamelCase_ = logging.get_logger(__name__) # General docstring lowerCamelCase_ = '''MobileNetV1Config''' # Base docstring lowerCamelCase_ = '''google/mobilenet_v1_1.0_224''' lowerCamelCase_ = [1, 10_24, 7, 7] # Image classification docstring lowerCamelCase_ = '''google/mobilenet_v1_1.0_224''' lowerCamelCase_ = '''tabby, tabby cat''' lowerCamelCase_ = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __lowercase ( __lowercase , __lowercase , __lowercase=None ) -> List[str]: '''simple docstring''' _A = {} if isinstance(__lowercase , __lowercase ): _A = model.mobilenet_va else: _A = model _A = "MobilenetV1/Conv2d_0/" _A = backbone.conv_stem.convolution.weight _A = backbone.conv_stem.normalization.bias _A = backbone.conv_stem.normalization.weight _A = backbone.conv_stem.normalization.running_mean _A = backbone.conv_stem.normalization.running_var for i in range(13 ): _A = i + 1 _A = i * 2 _A = backbone.layer[pt_index] _A = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' _A = pointer.convolution.weight _A = pointer.normalization.bias _A = pointer.normalization.weight _A = pointer.normalization.running_mean _A = pointer.normalization.running_var _A = backbone.layer[pt_index + 1] _A = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' _A = pointer.convolution.weight _A = pointer.normalization.bias _A = pointer.normalization.weight _A = pointer.normalization.running_mean _A = pointer.normalization.running_var if isinstance(__lowercase , __lowercase ): _A = "MobilenetV1/Logits/Conv2d_1c_1x1/" _A = model.classifier.weight _A = model.classifier.bias return tf_to_pt_map def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Dict: '''simple docstring''' try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model _A = tf.train.list_variables(__lowercase ) _A = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''' ) _A = tf.train.load_variable(__lowercase , __lowercase ) _A = array # Build TF to PyTorch weights loading map _A = _build_tf_to_pytorch_map(__lowercase , __lowercase , __lowercase ) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''' ) if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''' ) continue _A = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise" ) _A = np.transpose(__lowercase , (2, 3, 0, 1) ) elif "weights" in name: logger.info("Transposing" ) if len(pointer.shape ) == 2: # copying into linear layer _A = array.squeeze().transpose() else: _A = np.transpose(__lowercase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''' ) logger.info(F'''Initialize PyTorch weight {name} {array.shape}''' ) _A = torch.from_numpy(__lowercase ) tf_weights.pop(__lowercase , __lowercase ) tf_weights.pop(name + "/RMSProp" , __lowercase ) tf_weights.pop(name + "/RMSProp_1" , __lowercase ) tf_weights.pop(name + "/ExponentialMovingAverage" , __lowercase ) logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}''' ) return model def __lowercase ( __lowercase , __lowercase ) -> torch.Tensor: '''simple docstring''' _A , _A = features.shape[-2:] _A , _A = conv_layer.stride _A , _A = conv_layer.kernel_size if in_height % stride_height == 0: _A = max(kernel_height - stride_height , 0 ) else: _A = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: _A = max(kernel_width - stride_width , 0 ) else: _A = max(kernel_width - (in_width % stride_width) , 0 ) _A = pad_along_width // 2 _A = pad_along_width - pad_left _A = pad_along_height // 2 _A = pad_along_height - pad_top _A = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__lowercase , __lowercase , "constant" , 0.0 ) class _UpperCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool or str] = True , ): '''simple docstring''' super().__init__() _A = config if in_channels % groups != 0: raise ValueError(f'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(f'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) _A = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) _A = nn.Convad( in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase , groups=__UpperCAmelCase , bias=__UpperCAmelCase , padding_mode="zeros" , ) if use_normalization: _A = nn.BatchNormad( num_features=__UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=__UpperCAmelCase , track_running_stats=__UpperCAmelCase , ) else: _A = None if use_activation: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = ACTaFN[use_activation] elif isinstance(config.hidden_act , __UpperCAmelCase ): _A = ACTaFN[config.hidden_act] else: _A = config.hidden_act else: _A = None def lowerCAmelCase ( self : str , __UpperCAmelCase : torch.Tensor ): '''simple docstring''' if self.config.tf_padding: _A = apply_tf_padding(__UpperCAmelCase , self.convolution ) _A = self.convolution(__UpperCAmelCase ) if self.normalization is not None: _A = self.normalization(__UpperCAmelCase ) if self.activation is not None: _A = self.activation(__UpperCAmelCase ) return features class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = MobileNetVaConfig snake_case = load_tf_weights_in_mobilenet_va snake_case = '''mobilenet_v1''' snake_case = '''pixel_values''' snake_case = False def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[nn.Linear, nn.Convad] ): '''simple docstring''' if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__UpperCAmelCase , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCamelCase_ = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase_ = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , snake_case_ , ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : MobileNetVaConfig , __UpperCAmelCase : bool = True ): '''simple docstring''' super().__init__(__UpperCAmelCase ) _A = config _A = 32 _A = max(int(depth * config.depth_multiplier ) , config.min_depth ) _A = MobileNetVaConvLayer( __UpperCAmelCase , in_channels=config.num_channels , out_channels=__UpperCAmelCase , kernel_size=3 , stride=2 , ) _A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] _A = nn.ModuleList() for i in range(13 ): _A = out_channels if strides[i] == 2 or i == 0: depth *= 2 _A = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=__UpperCAmelCase , ) ) self.layer.append( MobileNetVaConvLayer( __UpperCAmelCase , in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=1 , ) ) _A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str] ): '''simple docstring''' raise NotImplementedError @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , ): '''simple docstring''' _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) _A = self.conv_stem(__UpperCAmelCase ) _A = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): _A = layer_module(__UpperCAmelCase ) if output_hidden_states: _A = all_hidden_states + (hidden_states,) _A = hidden_states if self.pooler is not None: _A = torch.flatten(self.pooler(__UpperCAmelCase ) , start_dim=1 ) else: _A = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__UpperCAmelCase , pooler_output=__UpperCAmelCase , hidden_states=__UpperCAmelCase , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , snake_case_ , ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : MobileNetVaConfig ): '''simple docstring''' super().__init__(__UpperCAmelCase ) _A = config.num_labels _A = MobileNetVaModel(__UpperCAmelCase ) _A = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head _A = nn.Dropout(config.classifier_dropout_prob , inplace=__UpperCAmelCase ) _A = nn.Linear(__UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ): '''simple docstring''' _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.mobilenet_va(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase ) _A = outputs.pooler_output if return_dict else outputs[1] _A = self.classifier(self.dropout(__UpperCAmelCase ) ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = "single_label_classification" else: _A = "multi_label_classification" if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(__UpperCAmelCase , __UpperCAmelCase ) if not return_dict: _A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from __future__ import annotations from math import pow, sqrt def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
1
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : Dict , __UpperCAmelCase : list ): '''simple docstring''' _A = set_counts _A = max(__UpperCAmelCase ) _A = len(__UpperCAmelCase ) _A = [1] * num_sets _A = list(range(__UpperCAmelCase ) ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = self.get_parent(__UpperCAmelCase ) _A = self.get_parent(__UpperCAmelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] _A = 0 _A = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 _A = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] _A = 0 _A = src_parent _A = self.set_counts[src_parent] _A = max(self.max_set , __UpperCAmelCase ) return True def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set _A = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
1
'''simple docstring''' from bisect import bisect from itertools import accumulate def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : x[0] / x[1] , reverse=__lowercase ) _A , _A = [i[0] for i in r], [i[1] for i in r] _A = list(accumulate(__lowercase ) ) _A = bisect(__lowercase , __lowercase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
1
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase ) -> int: '''simple docstring''' for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(__lowercase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(__lowercase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCamelCase_ = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def __lowercase ( __lowercase , __lowercase ) -> tuple[str, float]: '''simple docstring''' _A = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] ) return (item, float(__lowercase )) def __lowercase ( __lowercase , __lowercase ) -> tuple[str, str]: '''simple docstring''' _A = random.randint(0 , len(__lowercase ) - 1 ) _A = parent_a[:random_slice] + parent_a[random_slice:] _A = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' _A = list(__lowercase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: _A = random.choice(__lowercase ) return "".join(__lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase , ) -> list[str]: '''simple docstring''' _A = [] # Generate more children proportionally to the fitness score. _A = int(parent_a[1] * 100 ) + 1 _A = 10 if child_n >= 10 else child_n for _ in range(__lowercase ): _A = population_score[random.randint(0 , __lowercase )][0] _A , _A = crossover(parent_a[0] , __lowercase ) # Append new string to the population list. pop.append(mutate(__lowercase , __lowercase ) ) pop.append(mutate(__lowercase , __lowercase ) ) return pop def __lowercase ( __lowercase , __lowercase , __lowercase = True ) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: _A = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__lowercase ) # Verify that the target contains no genes besides the ones inside genes variable. _A = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _A = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__lowercase ) # Generate random starting population. _A = [] for _ in range(__lowercase ): population.append("".join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) ) # Just some logs to know what the algorithms is doing. _A , _A = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__lowercase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _A = [evaluate(__lowercase , __lowercase ) for item in population] # Check if there is a matching evolution. _A = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _A = population[: int(N_POPULATION / 3 )] population.clear() population.extend(__lowercase ) # Normalize population score to be between 0 and 1. _A = [ (item, score / len(__lowercase )) for item, score in population_score ] # This is selection for i in range(__lowercase ): population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__lowercase ) > N_POPULATION: break if __name__ == "__main__": lowerCamelCase_ = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCamelCase_ = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
1
'''simple docstring''' def __lowercase ( __lowercase ) -> list: '''simple docstring''' if n_term == "": return [] _A = [] for temp in range(int(__lowercase ) ): series.append(F'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": lowerCamelCase_ = input('''Enter the last number (nth term) of the Harmonic Series''') print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''') print(harmonic_series(nth_term))
79
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , ) -> List[str]: '''simple docstring''' _A = {} if train_file is not None: _A = [train_file] if eval_file is not None: _A = [eval_file] if test_file is not None: _A = [test_file] _A = datasets.load_dataset("csv" , data_files=__lowercase ) _A = list(ds[list(files.keys() )[0]].features.keys() ) _A = features_name.pop(__lowercase ) _A = list(set(ds[list(files.keys() )[0]][label_name] ) ) _A = {label: i for i, label in enumerate(__lowercase )} _A = tokenizer.model_input_names _A = {} if len(__lowercase ) == 1: for k in files.keys(): _A = ds[k].map( lambda __lowercase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=__lowercase , max_length=__lowercase , padding="max_length" ) , batched=__lowercase , ) elif len(__lowercase ) == 2: for k in files.keys(): _A = ds[k].map( lambda __lowercase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=__lowercase , max_length=__lowercase , padding="max_length" , ) , batched=__lowercase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _A = {k: v for k, v in ex.items() if k in input_names} _A = labelaid[ex[label_name]] yield (d, label) _A = ( tf.data.Dataset.from_generator( __lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _A = ( tf.data.Dataset.from_generator( __lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _A = ( tf.data.Dataset.from_generator( __lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCamelCase_ = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field(metadata={'''help''': '''Which column contains the label'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''The path of the training file'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''The path of the development file'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''The path of the test file'''} ) snake_case = field( default=1_28 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def __lowercase ( ) -> Tuple: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _A , _A , _A = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' F'''16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _A , _A , _A , _A = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowercase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _A = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _A = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , ) def compute_metrics(__lowercase ) -> Dict: _A = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _A = TFTrainer( model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _A = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _A = trainer.evaluate() _A = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(__lowercase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) results.update(__lowercase ) return results if __name__ == "__main__": main()
79
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
1
'''simple docstring''' from collections import deque class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = process_name # process name _A = arrival_time # arrival time of the process # completion time of finished process or last interrupted time _A = arrival_time _A = burst_time # remaining burst time _A = 0 # total time of the process wait in ready queue _A = 0 # time from arrival time to completion time class _UpperCAmelCase : """simple docstring""" def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : list[int] , __UpperCAmelCase : deque[Process] , __UpperCAmelCase : int , ): '''simple docstring''' _A = number_of_queues # time slice of queues that round robin algorithm applied _A = time_slices # unfinished process is in this ready_queue _A = queue # current time _A = current_time # finished process is in this sequence queue _A = deque() def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def lowerCAmelCase ( self : Any , __UpperCAmelCase : list[Process] ): '''simple docstring''' _A = [] for i in range(len(__UpperCAmelCase ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : list[Process] ): '''simple docstring''' _A = [] for i in range(len(__UpperCAmelCase ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def lowerCAmelCase ( self : Dict , __UpperCAmelCase : list[Process] ): '''simple docstring''' _A = [] for i in range(len(__UpperCAmelCase ) ): completion_times.append(queue[i].stop_time ) return completion_times def lowerCAmelCase ( self : Any , __UpperCAmelCase : deque[Process] ): '''simple docstring''' return [q.burst_time for q in queue] def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Process ): '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : deque[Process] ): '''simple docstring''' _A = deque() # sequence deque of finished process while len(__UpperCAmelCase ) != 0: _A = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(__UpperCAmelCase ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 _A = 0 # set the process's turnaround time because it is finished _A = self.current_time - cp.arrival_time # set the completion time _A = self.current_time # add the process to queue that has finished queue finished.append(__UpperCAmelCase ) self.finish_queue.extend(__UpperCAmelCase ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : deque[Process] , __UpperCAmelCase : int ): '''simple docstring''' _A = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(__UpperCAmelCase ) ): _A = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(__UpperCAmelCase ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time _A = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(__UpperCAmelCase ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished _A = 0 # set the finish time _A = self.current_time # update the process' turnaround time because it is finished _A = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(__UpperCAmelCase ) self.finish_queue.extend(__UpperCAmelCase ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def lowerCAmelCase ( self : str ): '''simple docstring''' for i in range(self.number_of_queues - 1 ): _A , _A = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest lowerCamelCase_ = Process('''P1''', 0, 53) lowerCamelCase_ = Process('''P2''', 0, 17) lowerCamelCase_ = Process('''P3''', 0, 68) lowerCamelCase_ = Process('''P4''', 0, 24) lowerCamelCase_ = 3 lowerCamelCase_ = [17, 25] lowerCamelCase_ = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) lowerCamelCase_ = Process('''P1''', 0, 53) lowerCamelCase_ = Process('''P2''', 0, 17) lowerCamelCase_ = Process('''P3''', 0, 68) lowerCamelCase_ = Process('''P4''', 0, 24) lowerCamelCase_ = 3 lowerCamelCase_ = [17, 25] lowerCamelCase_ = deque([Pa, Pa, Pa, Pa]) lowerCamelCase_ = MLFQ(number_of_queues, time_slices, queue, 0) lowerCamelCase_ = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
79
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
1
'''simple docstring''' def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) _A = str(bin(__lowercase ) ) binary_number += "0" * shift_amount return binary_number def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) _A = str(bin(__lowercase ) )[2:] if shift_amount >= len(__lowercase ): return "0b0" _A = binary_number[: len(__lowercase ) - shift_amount] return "0b" + shifted_binary_number def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' if number >= 0: # Get binary representation of positive number _A = "0" + str(bin(__lowercase ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number _A = len(bin(__lowercase )[3:] ) # Find 2's complement of number _A = bin(abs(__lowercase ) - (1 << binary_number_length) )[3:] _A = ( "1" + "0" * (binary_number_length - len(__lowercase )) + binary_number ) if shift_amount >= len(__lowercase ): return "0b" + binary_number[0] * len(__lowercase ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__lowercase ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' def __lowercase ( __lowercase = 1000 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
79
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class _UpperCAmelCase : """simple docstring""" snake_case = '''dummy_data''' snake_case = '''datasets''' snake_case = False def __init__( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Union[Version, str] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[List[Callable]] = None , ): '''simple docstring''' _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(__UpperCAmelCase ) # to be downloaded _A = None _A = None @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def lowerCAmelCase ( self : List[str] ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase ( self : int ): '''simple docstring''' if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def lowerCAmelCase ( self : str ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , *__UpperCAmelCase : Dict ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : Any ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Optional[int] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : List[str] ): '''simple docstring''' return path def lowerCAmelCase ( self : str ): '''simple docstring''' return {} def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: _A = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) _A = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __UpperCAmelCase ) ) for url in data_url ) _A = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def lowerCAmelCase ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' pass def lowerCAmelCase ( self : Dict ): '''simple docstring''' pass def lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[Any] ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase : List[Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) _A = Path(__UpperCAmelCase ) _A = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open("rb" ) def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
79
1
from math import ceil, sqrt def _a ( a :int = 1_000_000 ) -> int: a = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: a = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: a = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f"""{solution() = }""")
0
'''simple docstring''' def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]: '''simple docstring''' if isinstance(__lowercase , __lowercase ) and isinstance(__lowercase , __lowercase ): _A = len(set_a.intersection(__lowercase ) ) if alternative_union: _A = len(__lowercase ) + len(__lowercase ) else: _A = len(set_a.union(__lowercase ) ) return intersection / union if isinstance(__lowercase , (list, tuple) ) and isinstance(__lowercase , (list, tuple) ): _A = [element for element in set_a if element in set_b] if alternative_union: _A = len(__lowercase ) + len(__lowercase ) return len(__lowercase ) / union else: _A = set_a + [element for element in set_b if element not in set_a] return len(__lowercase ) / len(__lowercase ) return len(__lowercase ) / len(__lowercase ) return None if __name__ == "__main__": lowerCamelCase_ = {'''a''', '''b''', '''c''', '''d''', '''e'''} lowerCamelCase_ = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''} print(jaccard_similarity(set_a, set_b))
79
0
'''simple docstring''' import math import sys def lowerCAmelCase_ ( snake_case_ : int ) -> int: '''simple docstring''' if number != int(snake_case_ ): raise ValueError("the value of input must be a natural number" ) if number < 0: raise ValueError("the value of input must not be a negative number" ) if number == 0: return 1 UpperCAmelCase_ = [-1] * (number + 1) UpperCAmelCase_ = 0 for i in range(1 , number + 1 ): UpperCAmelCase_ = sys.maxsize UpperCAmelCase_ = int(math.sqrt(snake_case_ ) ) for j in range(1 , root + 1 ): UpperCAmelCase_ = 1 + answers[i - (j**2)] UpperCAmelCase_ = min(snake_case_ , snake_case_ ) UpperCAmelCase_ = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 0 snake_case = False snake_case = 3.0 class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} ) @require_cuda def lowerCAmelCase ( self : int ): '''simple docstring''' _A = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCamelCase_ = torch.nn.Linear(1_00, 2_00) lowerCamelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCamelCase_ = '''''' lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
79
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase : Tuple = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n' @add_start_docstrings(lowercase_ ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = """rag""" lowerCAmelCase__ : List[Any] = True def __init__(self : Dict , UpperCamelCase : List[Any]=None , UpperCamelCase : str=True , UpperCamelCase : List[Any]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=" / " , UpperCamelCase : Union[str, Any]=" // " , UpperCamelCase : List[str]=5 , UpperCamelCase : Tuple=300 , UpperCamelCase : Optional[int]=768 , UpperCamelCase : int=8 , UpperCamelCase : str="wiki_dpr" , UpperCamelCase : Optional[Any]="train" , UpperCamelCase : Any="compressed" , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=False , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=True , UpperCamelCase : int=False , UpperCamelCase : Any=False , UpperCamelCase : Any=False , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : List[Any] , ): '''simple docstring''' super().__init__( bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__ = kwargs.pop('''question_encoder''' ) lowercase__ = question_encoder_config.pop('''model_type''' ) lowercase__ = kwargs.pop('''generator''' ) lowercase__ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) lowercase__ = reduce_loss lowercase__ = label_smoothing lowercase__ = exclude_bos_score lowercase__ = do_marginalize lowercase__ = title_sep lowercase__ = doc_sep lowercase__ = n_docs lowercase__ = max_combined_length lowercase__ = dataset lowercase__ = dataset_split lowercase__ = index_name lowercase__ = retrieval_vector_size lowercase__ = retrieval_batch_size lowercase__ = passages_path lowercase__ = index_path lowercase__ = use_dummy_dataset lowercase__ = output_retrieved lowercase__ = do_deduplication lowercase__ = use_cache if self.forced_eos_token_id is None: lowercase__ = getattr(self.generator , '''forced_eos_token_id''' , UpperCamelCase ) @classmethod def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : int ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.question_encoder.to_dict() lowercase__ = self.generator.to_dict() lowercase__ = self.__class__.model_type return output
2
'''simple docstring''' def __lowercase ( __lowercase = 100 ) -> int: '''simple docstring''' _A = n * (n + 1) * (2 * n + 1) / 6 _A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
79
0
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class A ( __snake_case , __snake_case ): @register_to_config def __init__( self , SCREAMING_SNAKE_CASE = 768 , ) -> Dict: """simple docstring""" super().__init__() A : List[Any] = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE ) ) A : List[str] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> List[Any]: """simple docstring""" A : str = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) ) A : Any = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) ) return self def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = (embeds - self.mean) * 1.0 / self.std return embeds def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Union[str, Any] = (embeds * self.std) + self.mean return embeds
3
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') lowerCamelCase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) snake_case = field( default=snake_case_ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the training data.'''} ) snake_case = field(default=snake_case_ , metadata={'''help''': '''A folder containing the validation data.'''} ) snake_case = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) snake_case = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) snake_case = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class _UpperCAmelCase : """simple docstring""" snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) snake_case = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) snake_case = field(default=snake_case_ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) snake_case = field( default=snake_case_ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : Optional[int]=192 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=0.6 ): '''simple docstring''' _A = input_size _A = mask_patch_size _A = model_patch_size _A = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError("Input size must be divisible by mask patch size" ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError("Mask patch size must be divisible by model patch size" ) _A = self.input_size // self.mask_patch_size _A = self.mask_patch_size // self.model_patch_size _A = self.rand_size**2 _A = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : Any ): '''simple docstring''' _A = np.random.permutation(self.token_count )[: self.mask_count] _A = np.zeros(self.token_count , dtype=__UpperCAmelCase ) _A = 1 _A = mask.reshape((self.rand_size, self.rand_size) ) _A = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowercase ( __lowercase ) -> str: '''simple docstring''' _A = torch.stack([example["pixel_values"] for example in examples] ) _A = torch.stack([example["mask"] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowercase ( ) -> Dict: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mim" , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: _A = ds["train"].train_test_split(data_args.train_val_split ) _A = split["train"] _A = split["test"] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name_or_path: _A = AutoConfig.from_pretrained(model_args.config_name_or_path , **__lowercase ) elif model_args.model_name_or_path: _A = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(__lowercase , "decoder_type" ): _A = "simmim" # adapt config _A = model_args.image_size if model_args.image_size is not None else config.image_size _A = model_args.patch_size if model_args.patch_size is not None else config.patch_size _A = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { "image_size": model_args.image_size, "patch_size": model_args.patch_size, "encoder_stride": model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: _A = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__lowercase ) elif model_args.model_name_or_path: _A = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: _A = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } _A = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: _A = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _A = AutoModelForMaskedImageModeling.from_config(__lowercase ) if training_args.do_train: _A = ds["train"].column_names else: _A = ds["validation"].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = "image" elif "img" in column_names: _A = "img" else: _A = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py _A = Compose( [ Lambda(lambda __lowercase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator _A = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(__lowercase ): _A = [transforms(__lowercase ) for image in examples[image_column_name]] _A = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _A = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _A = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__lowercase ) # Initialize our trainer _A = Trainer( model=__lowercase , args=__lowercase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics("eval" , __lowercase ) trainer.save_metrics("eval" , __lowercase ) # Write model card and (optionally) push to hub _A = { "finetuned_from": model_args.model_name_or_path, "tasks": "masked-image-modeling", "dataset": data_args.dataset_name, "tags": ["masked-image-modeling"], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
79
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCAmelCase_ : lowerCamelCase : Union[str, Any] = LEDConfig lowerCamelCase : Tuple = {} lowerCamelCase : Any = '''gelu''' def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : Tuple=3_2 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : str=3_7 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=2_0 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : str=4 , ) -> Optional[Any]: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = eos_token_id lowerCAmelCase = pad_token_id lowerCAmelCase = bos_token_id lowerCAmelCase = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCAmelCase = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCAmelCase = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowerCAmelCase = prepare_led_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = tf.concat( [tf.zeros_like(UpperCAmelCase__ )[:, :-1], tf.ones_like(UpperCAmelCase__ )[:, -1:]] , axis=-1 , ) lowerCAmelCase = global_attention_mask return config, inputs_dict def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]: lowerCAmelCase = TFLEDModel(config=UpperCAmelCase__ ).get_decoder() lowerCAmelCase = inputs_dict['input_ids'] lowerCAmelCase = input_ids[:1, :] lowerCAmelCase = inputs_dict['attention_mask'][:1, :] lowerCAmelCase = 1 # first forward pass lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ ) lowerCAmelCase , lowerCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0] lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-3 ) def a_ ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[str]=None , ): if attention_mask is None: lowerCAmelCase = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ): lowerCamelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCamelCase : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCamelCase : str = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase : Dict = True lowerCamelCase : List[Any] = False lowerCamelCase : List[Any] = False lowerCamelCase : List[str] = False def __UpperCAmelCase ( self : Any ) -> Dict: lowerCAmelCase = TFLEDModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ ) def __UpperCAmelCase ( self : Union[str, Any] ) -> int: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ ) def __UpperCAmelCase ( self : Dict ) -> Tuple: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = tf.zeros_like(inputs_dict['attention_mask'] ) lowerCAmelCase = 2 lowerCAmelCase = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , ) lowerCAmelCase = True lowerCAmelCase = self.model_tester.seq_length lowerCAmelCase = self.model_tester.encoder_seq_length def check_decoder_attentions_output(UpperCAmelCase__ : Any ): lowerCAmelCase = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(UpperCAmelCase__ : Optional[Any] ): lowerCAmelCase = [t.numpy() for t in outputs.encoder_attentions] lowerCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowerCAmelCase = True lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = model_class(UpperCAmelCase__ ) lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) lowerCAmelCase = len(UpperCAmelCase__ ) self.assertEqual(config.output_hidden_states , UpperCAmelCase__ ) check_encoder_attentions_output(UpperCAmelCase__ ) if self.is_encoder_decoder: lowerCAmelCase = model_class(UpperCAmelCase__ ) lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase__ ) check_decoder_attentions_output(UpperCAmelCase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase = True lowerCAmelCase = model_class(UpperCAmelCase__ ) lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase__ ) check_encoder_attentions_output(UpperCAmelCase__ ) # Check attention is always last and order is fine lowerCAmelCase = True lowerCAmelCase = True lowerCAmelCase = model_class(UpperCAmelCase__ ) lowerCAmelCase = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ ) check_encoder_attentions_output(UpperCAmelCase__ ) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' ) def __UpperCAmelCase ( self : int ) -> List[Any]: pass def __UpperCAmelCase ( self : int ) -> Any: # TODO: Head-masking not yet implement pass def a_ ( lowerCamelCase : Any ): return tf.constant(lowerCamelCase , dtype=tf.intaa ) __snake_case =1e-4 @slow @require_tf class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led # change to intended input here lowerCAmelCase = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(**UpperCAmelCase__ )[0] lowerCAmelCase = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape , UpperCAmelCase__ ) # change to expected output here lowerCAmelCase = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-3 ) def __UpperCAmelCase ( self : List[Any] ) -> Tuple: lowerCAmelCase = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ) # change to intended input here lowerCAmelCase = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = model(**UpperCAmelCase__ )[0] lowerCAmelCase = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape , UpperCAmelCase__ ) # change to expected output here lowerCAmelCase = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-3 , rtol=1E-3 )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''canine''' def __init__( self : Dict , __UpperCAmelCase : List[str]=768 , __UpperCAmelCase : str=12 , __UpperCAmelCase : Union[str, Any]=12 , __UpperCAmelCase : int=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : List[Any]=16384 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : int=0xE000 , __UpperCAmelCase : List[Any]=0xE001 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[str]=8 , __UpperCAmelCase : int=16384 , __UpperCAmelCase : Union[str, Any]=128 , **__UpperCAmelCase : Dict , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps # Character config: _A = downsampling_rate _A = upsampling_kernel_size _A = num_hash_functions _A = num_hash_buckets _A = local_transformer_stride
79
0
import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ ( lowerCAmelCase): def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase="None" , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Any: _lowercase =parent _lowercase =batch_size _lowercase =seq_length _lowercase =is_training _lowercase =use_input_mask _lowercase =use_token_type_ids _lowercase =use_labels _lowercase =vocab_size _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =intermediate_size _lowercase =hidden_act _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =max_position_embeddings _lowercase =type_vocab_size _lowercase =type_sequence_label_size _lowercase =initializer_range _lowercase =num_labels _lowercase =num_choices _lowercase =relative_attention _lowercase =position_biased_input _lowercase =pos_att_type _lowercase =scope def __A (self ) -> Any: _lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase =None if self.use_input_mask: _lowercase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _lowercase =None if self.use_token_type_ids: _lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase =None _lowercase =None _lowercase =None if self.use_labels: _lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase =ids_tensor([self.batch_size] , self.num_choices ) _lowercase =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A (self ) -> Optional[int]: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __A (self ) -> List[Any]: _lowercase =self.get_config() _lowercase =3_0_0 return config def __A (self , UpperCAmelCase ) -> Union[str, Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]: _lowercase =DebertaModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )[0] _lowercase =model(UpperCAmelCase , token_type_ids=UpperCAmelCase )[0] _lowercase =model(UpperCAmelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: _lowercase =DebertaForMaskedLM(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]: _lowercase =self.num_labels _lowercase =DebertaForSequenceClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(UpperCAmelCase ) def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: _lowercase =self.num_labels _lowercase =DebertaForTokenClassification(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: _lowercase =DebertaForQuestionAnswering(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() _lowercase =model( UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A (self ) -> Any: _lowercase =self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) =config_and_inputs _lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): SCREAMING_SNAKE_CASE__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def __A (self ) -> List[str]: _lowercase =DebertaModelTester(self ) _lowercase =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 ) def __A (self ) -> Dict: self.config_tester.run_common_tests() def __A (self ) -> Optional[int]: _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*UpperCAmelCase ) def __A (self ) -> Tuple: _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCAmelCase ) def __A (self ) -> List[str]: _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCAmelCase ) def __A (self ) -> int: _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*UpperCAmelCase ) def __A (self ) -> str: _lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*UpperCAmelCase ) @slow def __A (self ) -> Optional[int]: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase =DebertaModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase): @unittest.skip(reason='''Model not available yet''' ) def __A (self ) -> Optional[Any]: pass @slow def __A (self ) -> Any: _lowercase =DebertaModel.from_pretrained('''microsoft/deberta-base''' ) _lowercase =torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) _lowercase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _lowercase =model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0] # compare the actual values for a slice. _lowercase =torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) , f"{output[:, 1:4, 1:4]}" )
5
'''simple docstring''' class _UpperCAmelCase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : list[int] ): '''simple docstring''' _A = len(__UpperCAmelCase ) _A = [0] * len_array if len_array > 0: _A = array[0] for i in range(1 , __UpperCAmelCase ): _A = self.prefix_sum[i - 1] + array[i] def lowerCAmelCase ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : int ): '''simple docstring''' _A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
79
0
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json A : List[str] = 'sshleifer/mar_enro_6_3_student' class __A( a ): def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' super().setUp() __a = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_snake_case , ) __a = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple: '''simple docstring''' MarianMTModel.from_pretrained(_snake_case ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]: '''simple docstring''' __a = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script __a = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() __a = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): __a = bash_script.replace(_snake_case , str(_snake_case ) ) __a = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") __a = F""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future __a = ['''finetune.py'''] + bash_script.split() + args with patch.object(_snake_case , '''argv''' , _snake_case ): __a = argparse.ArgumentParser() __a = pl.Trainer.add_argparse_args(_snake_case ) __a = SummarizationModule.add_model_specific_args(_snake_case , os.getcwd() ) __a = parser.parse_args() __a = main(_snake_case ) # Check metrics __a = load_json(model.metrics_save_path ) __a = metrics['''val'''][0] __a = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , _snake_case ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict __a = os.listdir(_snake_case ) __a = [x for x in contents if x.endswith('''.ckpt''' )][0] __a = os.path.join(args.output_dir , _snake_case ) __a = torch.load(_snake_case , map_location='''cpu''' ) __a = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __a = {os.path.basename(_snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class __A( a ): @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' __a = F"""{self.test_file_dir_str}/test_data/wmt_en_ro""" __a = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script __a = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) __a = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) __a = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): __a = bash_script.replace(_snake_case , str(_snake_case ) ) __a = self.get_auto_remove_tmp_dir() __a = bash_script.replace('''--fp16''' , '''''' ) __a = 6 __a = ( ['''distillation.py'''] + bash_script.split() + [ F"""--output_dir={output_dir}""", '''--gpus=1''', '''--learning_rate=1e-3''', F"""--num_train_epochs={epochs}""", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(_snake_case , '''argv''' , _snake_case ): __a = argparse.ArgumentParser() __a = pl.Trainer.add_argparse_args(_snake_case ) __a = SummarizationDistiller.add_model_specific_args(_snake_case , os.getcwd() ) __a = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu __a = distill_main(_snake_case ) # Check metrics __a = load_json(model.metrics_save_path ) __a = metrics['''val'''][0] __a = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , _snake_case ) # check lightning ckpt can be loaded and has a reasonable statedict __a = os.listdir(_snake_case ) __a = [x for x in contents if x.endswith('''.ckpt''' )][0] __a = os.path.join(args.output_dir , _snake_case ) __a = torch.load(_snake_case , map_location='''cpu''' ) __a = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __a = {os.path.basename(_snake_case ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
6
'''simple docstring''' from typing import List import numpy as np def __lowercase ( __lowercase ) -> int: '''simple docstring''' _A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) _A = max(lists_lengths.values() , default=0 ) return max(1 , __lowercase ) def __lowercase ( __lowercase , __lowercase ) -> List[range]: '''simple docstring''' _A = [] for group_idx in range(__lowercase ): _A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break _A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 _A = range(__lowercase , start + num_shards_to_add ) shards_indices_per_group.append(__lowercase ) return shards_indices_per_group def __lowercase ( __lowercase , __lowercase ) -> List[dict]: '''simple docstring''' _A = _number_of_shards_in_gen_kwargs(__lowercase ) if num_shards == 1: return [dict(__lowercase )] else: _A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__lowercase , __lowercase ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__lowercase ) ) ] def __lowercase ( __lowercase ) -> dict: '''simple docstring''' return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __lowercase ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def __lowercase ( __lowercase , __lowercase ) -> dict: '''simple docstring''' _A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )} _A = {} for size in list_sizes: _A = list(range(__lowercase ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes _A = dict(__lowercase ) for key, value in shuffled_kwargs.items(): if isinstance(__lowercase , __lowercase ): _A = [value[i] for i in indices_per_size[len(__lowercase )]] return shuffled_kwargs
79
0
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging lowercase_ = logging.get_logger(__name__) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = CLIPConfig lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self : int,lowercase_ : CLIPConfig )-> Union[str, Any]: '''simple docstring''' super().__init__(lowercase_ ) A__ = CLIPVisionModelWithProjection(config.vision_config ) A__ = nn.Linear(config.vision_config.projection_dim,1 ) A__ = nn.Linear(config.vision_config.projection_dim,1 ) @torch.no_grad() def snake_case__ ( self : Optional[int],lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : str=0.5,lowercase_ : List[Any]=0.5 )-> List[str]: '''simple docstring''' A__ = self.vision_model(lowercase_ )[0] A__ = self.p_head(lowercase_ ) A__ = nsfw_detected.flatten() A__ = nsfw_detected > p_threshold A__ = nsfw_detected.tolist() if any(lowercase_ ): logger.warning( 'Potential NSFW content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, nsfw_detected_ in enumerate(lowercase_ ): if nsfw_detected_: A__ = np.zeros(images[idx].shape ) A__ = self.w_head(lowercase_ ) A__ = watermark_detected.flatten() A__ = watermark_detected > w_threshold A__ = watermark_detected.tolist() if any(lowercase_ ): logger.warning( 'Potential watermarked content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, watermark_detected_ in enumerate(lowercase_ ): if watermark_detected_: A__ = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
7
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase_ = '''hf-internal-testing/tiny-random-bert''' lowerCAmelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowerCAmelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any ) ->List[Any]: snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase ) ) ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: snake_case_ = f.read() self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) self.assertTrue(os.path.isfile(_UpperCamelCase ) ) # File is cached at the same place the second time. snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) # Using a specific revision to test the full commit hash. snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) def snake_case__( self : Tuple ) ->Optional[int]: with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): snake_case_ = cached_file('''tiny-random-bert''' , _UpperCamelCase ) with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): snake_case_ = cached_file(_UpperCamelCase , '''conf''' ) def snake_case__( self : Optional[int] ) ->int: with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): snake_case_ = cached_file(_UpperCamelCase , '''conf''' ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: snake_case_ = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , '''.no_exist''' , _UpperCamelCase , '''conf''' ) ) ) snake_case_ = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) snake_case_ = cached_file(_UpperCamelCase , '''conf''' , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) snake_case_ = mock.Mock() snake_case_ = 5_0_0 snake_case_ = {} snake_case_ = HTTPError snake_case_ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head: snake_case_ = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) # This check we did call the fake head request mock_head.assert_called() def snake_case__( self : Dict ) ->Optional[int]: self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) def snake_case__( self : Optional[int] ) ->str: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCamelCase , revision='''ahaha''' ) snake_case_ = get_file_from_repo('''bert-base-cased''' , _UpperCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. snake_case_ = json.loads(open(_UpperCamelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_6_8 ) def snake_case__( self : Optional[Any] ) ->Any: with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = Path(_UpperCamelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCamelCase , '''a.txt''' ) , str(_UpperCamelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCamelCase , '''b.txt''' ) )
8
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ): '''simple docstring''' super().__init__() _A = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" _A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase ) else: _A = None _A = torch.nn.Parameter(__UpperCAmelCase ) class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 snake_case = 42 def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ): '''simple docstring''' _A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1 # get prompt text embeddings _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) _A = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: _A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) _A = text_input_ids[:, : self.tokenizer.model_max_length] _A = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 _A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: _A = self.learned_classifier_free_sampling_embeddings.embeddings _A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 ) else: _A = [""] * batch_size _A = text_input_ids.shape[-1] _A = self.tokenizer( __UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , ) _A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings _A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _A = negative_prompt_embeds.shape[1] _A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 ) _A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _A = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ): '''simple docstring''' if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = 1 elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _A = len(__UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' ) _A = batch_size * num_images_per_prompt _A = guidance_scale > 1.0 _A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCAmelCase )}.''' ) # get the initial completely masked latents unless the user supplied it _A = (batch_size, self.transformer.num_latent_pixels) if latents is None: _A = self.transformer.num_vector_embeds - 1 _A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( "Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0," f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) _A = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps.to(self.device ) _A = latents for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the sample if we are doing classifier free guidance _A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` _A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample if do_classifier_free_guidance: _A , _A = model_output.chunk(2 ) _A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase ) _A = self.truncate(__UpperCAmelCase , __UpperCAmelCase ) # remove `log(0)`'s (`-inf`s) _A = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = self.vqvae.config.vq_embed_dim _A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) _A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase ) _A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample _A = (image / 2 + 0.5).clamp(0 , 1 ) _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ): '''simple docstring''' _A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase ) _A = torch.exp(__UpperCAmelCase ) _A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out _A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase ) _A = torch.cat((all_true, keep_mask) , dim=1 ) _A = keep_mask[:, :-1, :] _A = keep_mask.gather(1 , indices.argsort(1 ) ) _A = log_p_x_0.clone() _A = -torch.inf # -inf = log(0) return rv
79
0
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class _lowercase ( nn.Module ): '''simple docstring''' def __init__( self :Optional[int] , lowerCAmelCase__ :int = 16 , lowerCAmelCase__ :int = 88 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 32 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :str = "geglu" , lowerCAmelCase__ :Optional[int] = None , ) -> int: super().__init__() __SCREAMING_SNAKE_CASE : int = nn.ModuleList( [ TransformeraDModel( num_attention_heads=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , num_layers=lowerCAmelCase__ , dropout=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , sample_size=lowerCAmelCase__ , num_vector_embeds=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , num_embeds_ada_norm=lowerCAmelCase__ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __SCREAMING_SNAKE_CASE : List[str] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __SCREAMING_SNAKE_CASE : Optional[int] = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __SCREAMING_SNAKE_CASE : List[str] = [1, 0] def __magic_name__( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :bool = True , ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : List[Any] = hidden_states __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : Optional[int] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens __SCREAMING_SNAKE_CASE : Optional[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __SCREAMING_SNAKE_CASE : str = self.transformer_index_for_condition[i] __SCREAMING_SNAKE_CASE : Optional[Any] = self.transformers[transformer_index]( lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] __SCREAMING_SNAKE_CASE : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __SCREAMING_SNAKE_CASE : List[str] = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=lowerCAmelCase__ )
9
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def __lowercase ( __lowercase , __lowercase=False ) -> int: '''simple docstring''' _A = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _A = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: _A = "" else: _A = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _A = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[ : config.hidden_size, : ] _A = in_proj_bias[: config.hidden_size] _A = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _A = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _A = in_proj_weight[ -config.hidden_size :, : ] _A = in_proj_bias[-config.hidden_size :] def __lowercase ( __lowercase ) -> List[str]: '''simple docstring''' _A = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Tuple: '''simple docstring''' _A = dct.pop(__lowercase ) _A = val def __lowercase ( ) -> List[str]: '''simple docstring''' _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Tuple: '''simple docstring''' _A = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=__lowercase , ) _A = ViTHybridConfig(backbone_config=__lowercase , image_size=384 , num_labels=1000 ) _A = False # load original model from timm _A = timm.create_model(__lowercase , pretrained=__lowercase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _A = timm_model.state_dict() if base_model: remove_classification_head_(__lowercase ) _A = create_rename_keys(__lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase , __lowercase ) _A = "huggingface/label-files" _A = "imagenet-1k-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": _A = ViTHybridModel(__lowercase ).eval() else: _A = ViTHybridForImageClassification(__lowercase ).eval() model.load_state_dict(__lowercase ) # create image processor _A = create_transform(**resolve_data_config({} , model=__lowercase ) ) _A = transform.transforms _A = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } _A = ViTHybridImageProcessor( do_resize=__lowercase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowercase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=__lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) _A = prepare_img() _A = transform(__lowercase ).unsqueeze(0 ) _A = processor(__lowercase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(__lowercase , __lowercase ) # verify logits with torch.no_grad(): _A = model(__lowercase ) _A = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: _A = timm_model.forward_features(__lowercase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__lowercase , outputs.pooler_output , atol=1e-3 ) else: _A = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__lowercase ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_r50_s16_384''', type=str, help='''Name of the hybrid ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) lowerCamelCase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
0
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = CLIPTokenizer lowercase_ = CLIPTokenizerFast lowercase_ = True lowercase_ = {} lowercase_ = False def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]: '''simple docstring''' super().setUp() # fmt: off lowerCamelCase__: str =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowerCamelCase__: Optional[int] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_)))) lowerCamelCase__: Dict =["#version: 0.2", "l o", "lo w</w>", "e r</w>"] lowerCamelCase__: List[str] ={"unk_token": "<unk>"} lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as fp: fp.write(json.dumps(UpperCAmelCase_) + "\n") with open(self.merges_file , "w" , encoding="utf-8") as fp: fp.write("\n".join(UpperCAmelCase_)) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : List[str]) ->List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map) return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Dict: '''simple docstring''' kwargs.update(self.special_tokens_map) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[Any]) ->int: '''simple docstring''' lowerCamelCase__: Optional[Any] ="lower newer" lowerCamelCase__: Optional[Any] ="lower newer" return input_text, output_text def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) lowerCamelCase__: Any ="lower newer" lowerCamelCase__: List[str] =["lo", "w", "er</w>", "n", "e", "w", "er</w>"] lowerCamelCase__: List[str] =tokenizer.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) lowerCamelCase__: Tuple =tokens + [tokenizer.unk_token] lowerCamelCase__: List[str] =[10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_) @require_ftfy def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCamelCase__: Tuple =self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: int =self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_) lowerCamelCase__: Union[str, Any] ="A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d." lowerCamelCase__: str =tokenizer_s.tokenize(UpperCAmelCase_) lowerCamelCase__: int =tokenizer_r.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways lowerCamelCase__: Optional[int] ="xa\u0303y" + " " + "x\xe3y" lowerCamelCase__: Union[str, Any] =tokenizer_s.tokenize(UpperCAmelCase_) lowerCamelCase__: Tuple =tokenizer_r.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) # Test that the tokenization is identical on unicode of space type lowerCamelCase__: Tuple =[ "\u0009", # (horizontal tab, '\t') "\u000B", # (vertical tab) "\u000C", # (form feed) "\u0020", # (space, ' ') "\u200E", # (left-to-right mark):w "\u200F", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: lowerCamelCase__: int =tokenizer_s.tokenize(UpperCAmelCase_) lowerCamelCase__: Optional[Any] =tokenizer_r.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) # Test that the tokenization is identical on unicode of line break type lowerCamelCase__: Tuple =[ "\u000A", # (line feed, '\n') "\r\n", # (carriage return and line feed, '\r\n') "\u000D", # (carriage return, '\r') "\r", # (carriage return, '\r') "\u000D", # (carriage return, '\r') "\u2028", # (line separator) "\u2029", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: lowerCamelCase__: List[str] =tokenizer_s.tokenize(UpperCAmelCase_) lowerCamelCase__: str =tokenizer_r.tokenize(UpperCAmelCase_) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""): lowerCamelCase__: List[str] ="hello" # `hello` is a token in the vocabulary of `pretrained_name` lowerCamelCase__: str =F"""{text_of_1_token} {text_of_1_token}""" lowerCamelCase__: int =self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , ) lowerCamelCase__: str =tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase_))) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase_) + 1, len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , ) lowerCamelCase__: str =F""" {text}""" lowerCamelCase__: Optional[Any] =self.rust_tokenizer_class.from_pretrained( UpperCAmelCase_ , use_fast=UpperCAmelCase_ , ) lowerCamelCase__: Tuple =tokenizer_r(UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase_))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase_) + 1, 1 + len(UpperCAmelCase_) + 1 + len(UpperCAmelCase_)) , ) def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict: '''simple docstring''' with self.assertRaises(UpperCAmelCase_) as context: self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer") self.assertTrue( context.exception.args[0].startswith( "The `backend_tokenizer` provided does not match the expected format.")) @require_ftfy def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]: '''simple docstring''' super().test_tokenization_python_rust_equals() def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]: '''simple docstring''' pass
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
79
0
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCAmelCase__ = float('nan') class lowerCAmelCase__ : '''simple docstring''' def __init__( self , __lowerCamelCase) -> Optional[Any]: _A : List[Any] = sys.stdout _A : str = open(__lowerCamelCase , "a") def __getattr__( self , __lowerCamelCase) -> List[str]: return getattr(self.stdout , __lowerCamelCase) def _lowerCamelCase ( self , __lowerCamelCase) -> str: self.stdout.write(__lowerCamelCase) # strip tqdm codes self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M)) def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ): _A : Tuple = [] # deal with critical env vars _A : Dict = ["CUDA_VISIBLE_DEVICES"] for key in env_keys: _A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) _A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _A : Tuple = [] _A : Dict = "" while len(UpperCamelCase__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) _A : Union[str, Any] = "" return "\\\n".join(UpperCamelCase__ ) def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ): # unwrap multi-line input _A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd ) # remove --output_dir if any and set our own _A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir _A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , ) _A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ ) if verbose: print("STDOUT" , result.stdout ) print("STDERR" , result.stderr ) # save the streams _A : Tuple = variation.replace(" " , "-" ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("failed" ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f: _A : List[str] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ): _A : Union[str, Any] = [] _A : Optional[int] = [] _A : Any = f"{id}: {variation:<{longest_variation_len}}" _A : Dict = f"{preamble}: " _A : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ): _A : Optional[Any] = process_run_single( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) _A : Optional[Any] = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" _A : str = f"\33[2K\r{outcome}" if len(UpperCamelCase__ ) > 0: _A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _A : Any = round(mean_metrics[target_metric_key] , 2 ) _A : Tuple = f"{outcome} {mean_target}" if len(UpperCamelCase__ ) > 1: results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}" print(UpperCamelCase__ ) _A : Optional[int] = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def _UpperCAmelCase (): _A : int = torch.cuda.get_device_properties(torch.device("cuda" ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ): _A : Any = pd.DataFrame(UpperCamelCase__ ) _A : List[str] = "variation" _A : List[Any] = "diff_%" _A : int = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel _A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): _A : Optional[Any] = df.apply( lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="columns" , ) # re-order columns _A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols # capitalize _A : Tuple = df.rename(str.capitalize , axis="columns" ) # make the cols as narrow as possible _A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" ) _A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" ) _A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )] print("\n\n".join(UpperCamelCase__ ) ) def _UpperCAmelCase (): _A : int = argparse.ArgumentParser() parser.add_argument( "--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , ) parser.add_argument( "--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , ) parser.add_argument( "--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , ) parser.add_argument( "--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , ) parser.add_argument( "--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , ) parser.add_argument( "--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , ) parser.add_argument( "--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , ) parser.add_argument( "--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , ) _A : int = parser.parse_args() _A : Union[str, Any] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) _A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ ) # split each dimension into its --foo variations _A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) ) _A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys _A : str = args.report_metric_keys.split() # capture prints into a log file for convenience _A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) _A : Tuple = Tee(UpperCamelCase__ ) print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" ) print(f"Base command: {' '.join(UpperCamelCase__ )}" ) _A : str = "variation" _A : Union[str, Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ): _A : Dict = base_cmd + variation.split() results.append( process_run( id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) ) process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ ) if __name__ == "__main__": main()
11
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets lowerCamelCase_ = datasets.logging.get_logger(__name__) lowerCamelCase_ = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' lowerCamelCase_ = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' lowerCamelCase_ = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase ( datasets.Metric ): """simple docstring""" def lowerCAmelCase ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' if self.config_name == "default": _A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: _A = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ): '''simple docstring''' if gpus is None: _A = 1 if torch.cuda.is_available() else 0 _A = {"src": sources, "mt": predictions, "ref": references} _A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )] _A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
79
0
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class lowerCamelCase__: def __init__( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=99 , UpperCamelCase_: Optional[int]=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=True , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: int=16 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: Tuple=None , ): __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_multiple_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = weight_tying __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def lowerCAmelCase__ ( self: int ): __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = self.get_config() return config, input_ids, input_mask, token_labels def lowerCAmelCase__ ( self: Any ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = True return config, input_ids, input_mask, token_labels def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ): __lowerCamelCase = GPTNeoXJapaneseModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) __lowerCamelCase = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] ): __lowerCamelCase = True __lowerCamelCase = GPTNeoXJapaneseModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: int ): __lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ): __lowerCamelCase = True __lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) __lowerCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) __lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) __lowerCamelCase = output_from_no_past["""hidden_states"""][0] __lowerCamelCase = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs __lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): UpperCAmelCase__ : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () UpperCAmelCase__ : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () UpperCAmelCase__ : Dict = ( {'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[Any] = False UpperCAmelCase__ : Tuple = False UpperCAmelCase__ : Optional[int] = False def lowerCAmelCase__ ( self: int ): __lowerCamelCase = GPTNeoXJapaneseModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def lowerCAmelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self: int ): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: Any ): # This regression test was failing with PyTorch < 1.3 __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() __lowerCamelCase = None self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self: str ): __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_ ) @slow def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = """abeja/gpt-neox-japanese-2.7b""" __lowerCamelCase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""] __lowerCamelCase = [ """データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""", """100年後に必要とされる会社は、「人」が中心の会社です。""", """フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""", """国境の長いトンネルを抜けると、そこは雪国だった。""", """美味しい日本食といえば、やっぱりお寿司ですよね。""", ] __lowerCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase_ ) __lowerCamelCase = [] for prompt in prompts: __lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""pt""" ).input_ids __lowerCamelCase = model.generate(UpperCamelCase_ , max_length=50 ) __lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) predicted_outputs += generated_string self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
12
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
0
import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=1024 , lowerCAmelCase__ : int=1024 , lowerCAmelCase__ : Optional[Any]=3.6): SCREAMING_SNAKE_CASE_: List[Any] = tokenizer SCREAMING_SNAKE_CASE_: str = tokenizer.bos_token_id SCREAMING_SNAKE_CASE_: Optional[Any] = dataset SCREAMING_SNAKE_CASE_: Tuple = seq_length SCREAMING_SNAKE_CASE_: str = seq_length * chars_per_token * num_of_sequences def __iter__( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = iter(self.dataset) SCREAMING_SNAKE_CASE_: Union[str, Any] = True while more_examples: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowerCAmelCase__)["content"]) buffer_len += len(buffer[-1]) except StopIteration: SCREAMING_SNAKE_CASE_: str = False break SCREAMING_SNAKE_CASE_: str = tokenizer(lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Optional[Any] = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id]) for i in range(0 , len(lowerCAmelCase__) , self.seq_length): SCREAMING_SNAKE_CASE_: Tuple = all_token_ids[i : i + self.seq_length] if len(lowerCAmelCase__) == self.seq_length: yield torch.tensor(lowerCAmelCase__) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = {"streaming": True} SCREAMING_SNAKE_CASE_: Any = load_dataset(args.dataset_name , split="train" , **_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = ConstantLengthDataset(_UpperCAmelCase , _UpperCAmelCase , seq_length=args.seq_length ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader(_UpperCAmelCase , batch_size=args.batch_size ) return eval_dataloader def A_ ( _UpperCAmelCase ): model.eval() SCREAMING_SNAKE_CASE_: Optional[Any] = [] for step, batch in enumerate(_UpperCAmelCase ): with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(_UpperCAmelCase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break SCREAMING_SNAKE_CASE_: Optional[int] = torch.mean(torch.cat(_UpperCAmelCase ) ) try: SCREAMING_SNAKE_CASE_: Dict = torch.exp(_UpperCAmelCase ) except OverflowError: SCREAMING_SNAKE_CASE_: Any = float("inf" ) return loss.item(), perplexity.item() # Setup Accelerator lowerCAmelCase : Optional[Any] = Accelerator() # Parse configuration lowerCAmelCase : List[str] = HfArgumentParser(EvaluationArguments) lowerCAmelCase : List[str] = parser.parse_args() set_seed(args.seed) # Logging lowerCAmelCase : List[str] = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer lowerCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowerCAmelCase : Any = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowerCAmelCase : Optional[Any] = create_dataloader(args) # Prepare everything with our `accelerator`. lowerCAmelCase , lowerCAmelCase : List[str] = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") lowerCAmelCase , lowerCAmelCase : List[str] = evaluate(args) logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
13
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _UpperCAmelCase : """simple docstring""" snake_case = PegasusConfig snake_case = {} snake_case = '''gelu''' def __init__( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=13 , __UpperCAmelCase : int=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=False , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Tuple=37 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=40 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Any=0 , ): '''simple docstring''' _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_pegasus_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ): '''simple docstring''' _A = TFPegasusModel(config=__UpperCAmelCase ).get_decoder() _A = inputs_dict["input_ids"] _A = input_ids[:1, :] _A = inputs_dict["attention_mask"][:1, :] _A = inputs_dict["head_mask"] _A = 1 # first forward pass _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _A = tf.concat([input_ids, next_tokens] , axis=-1 ) _A = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _A = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _A = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _A = output_from_no_past[:, -3:, random_slice_idx] _A = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1E-3 ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case = True snake_case = False snake_case = False def lowerCAmelCase ( self : str ): '''simple docstring''' _A = TFPegasusModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case = '''google/pegasus-xsum''' @cached_property def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.translate_src_text(**__UpperCAmelCase ) assert self.expected_text == generated_words def lowerCAmelCase ( self : Dict , **__UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.tokenizer(self.src_text , **__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="tf" ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , ) _A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
79
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor _lowerCamelCase : str = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : int , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[Any]) ->None: '''simple docstring''' warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
14
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=99 , __UpperCAmelCase : List[str]=32 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=None , ): '''simple docstring''' _A = parent _A = 13 _A = 7 _A = True _A = True _A = True _A = True _A = 99 _A = 32 _A = 2 _A = 4 _A = 37 _A = "gelu" _A = 0.1 _A = 0.1 _A = 512 _A = 16 _A = 2 _A = 0.02 _A = 3 _A = 4 _A = None def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = TFRoFormerModel(config=__UpperCAmelCase ) _A = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} _A = [input_ids, input_mask] _A = model(__UpperCAmelCase ) _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = True _A = TFRoFormerForCausalLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ): '''simple docstring''' _A = TFRoFormerForMaskedLM(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForSequenceClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ): '''simple docstring''' _A = self.num_choices _A = TFRoFormerForMultipleChoice(config=__UpperCAmelCase ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _A = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' _A = self.num_labels _A = TFRoFormerForTokenClassification(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : int ): '''simple docstring''' _A = TFRoFormerForQuestionAnswering(config=__UpperCAmelCase ) _A = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } _A = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case = ( { '''feature-extraction''': TFRoFormerModel, '''fill-mask''': TFRoFormerForMaskedLM, '''question-answering''': TFRoFormerForQuestionAnswering, '''text-classification''': TFRoFormerForSequenceClassification, '''text-generation''': TFRoFormerForCausalLM, '''token-classification''': TFRoFormerForTokenClassification, '''zero-shot''': TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case = False snake_case = False def lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerModelTester(self ) _A = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__UpperCAmelCase ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def lowerCAmelCase ( self : Any ): '''simple docstring''' _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def lowerCAmelCase ( self : Dict ): '''simple docstring''' _A = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) _A = tf.constant([[0, 1, 2, 3, 4, 5]] ) _A = model(__UpperCAmelCase )[0] # TODO Replace vocab size _A = 50000 _A = [1, 6, vocab_size] self.assertEqual(output.shape , __UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. _A = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = tf.constant([[4, 10]] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) _A = emba(input_ids.shape ) _A = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' _A = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) _A = emba.weight[:3, :5] tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , atol=self.tolerance ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" snake_case = 1E-4 def lowerCAmelCase ( self : str ): '''simple docstring''' _A = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 _A = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) _A = embed_positions([2, 16, 768] )[None, None, :, :] _A , _A = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _A = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) _A = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCAmelCase , atol=self.tolerance )
79
0
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient SCREAMING_SNAKE_CASE :int = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def UpperCAmelCase ( a_ ) -> Union[str, Any]: """simple docstring""" __A = test_results.split(" " ) __A = 0 __A = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __A = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(a_ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def UpperCAmelCase ( a_ ) -> Optional[int]: """simple docstring""" __A = {} __A = None __A = False for line in failures_short_lines.split("\n" ): if re.search(r"_ \[doctest\]" , a_ ): __A = True __A = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): __A = line __A = False return failures class UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[Any] ,A : str ,A : Dict ): __A = title __A = doc_test_results["time_spent"].split("," )[0] __A = doc_test_results["success"] __A = doc_test_results["failures"] __A = self.n_success + self.n_failures # Failures and success of the modeling tests __A = doc_test_results @property def UpperCamelCase_ ( self : Optional[int] ): __A = [self._time_spent] __A = 0 for time in time_spent: __A = time.split(":" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(A ) == 1: __A = [0, 0, time_parts[0]] __A , __A , __A = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds __A , __A , __A = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f'''{int(A )}h{int(A )}m{int(A )}s''' @property def UpperCamelCase_ ( self : Union[str, Any] ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def UpperCamelCase_ ( self : Optional[Any] ): return { "type": "section", "text": { "type": "plain_text", "text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def UpperCamelCase_ ( self : Dict ): return { "type": "section", "text": { "type": "plain_text", "text": ( f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' f''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def UpperCamelCase_ ( self : List[str] ): __A = 40 __A = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(A ,A )} __A = "" for category, failures in category_failures.items(): if len(A ) == 0: continue if report != "": report += "\n\n" report += f'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(A ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f'''The following examples had failures:\n\n\n{report}\n''', }, } @property def UpperCamelCase_ ( self : List[Any] ): __A = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(A ) @staticmethod def UpperCamelCase_ ( ): __A = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } ] print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(A )} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] ,text="There was an issue running the tests." ,blocks=A ,) def UpperCamelCase_ ( self : Tuple ): print("Sending the following payload" ) print(json.dumps({"blocks": json.loads(self.payload )} ) ) __A = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else "All tests passed." __A = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] ,blocks=self.payload ,text=A ,) def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : List[Any] ,A : Optional[int] ,A : Union[str, Any] ): __A = "" for key, value in failures.items(): __A = value[:2_00] + " [Truncated]" if len(A ) > 2_50 else value failures_text += f'''*{key}*\n_{value}_\n\n''' __A = job_name __A = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: __A = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def UpperCamelCase_ ( self : Tuple ): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made." ) __A = self.doc_test_results.pop("job_link" ) self.doc_test_results.pop("failures" ) self.doc_test_results.pop("success" ) self.doc_test_results.pop("time_spent" ) __A = sorted(self.doc_test_results.items() ,key=lambda A : t[0] ) for job, job_result in sorted_dict: if len(job_result["failures"] ): __A = f'''*Num failures* :{len(job_result['failed'] )} \n''' __A = job_result["failures"] __A = self.get_reply_blocks(A ,A ,A ,text=A ) print("Sending the following reply" ) print(json.dumps({"blocks": blocks} ) ) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] ,text=f'''Results for {job}''' ,blocks=A ,thread_ts=self.thread_ts["ts"] ,) time.sleep(1 ) def UpperCAmelCase ( ) -> str: """simple docstring""" __A = os.environ["GITHUB_RUN_ID"] __A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' __A = requests.get(a_ ).json() __A = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) __A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 ) for i in range(a_ ): __A = requests.get(url + F'''&page={i + 2}''' ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , a_ ) return {} def UpperCAmelCase ( a_ ) -> List[str]: """simple docstring""" __A = {} if os.path.exists(a_ ): __A = os.listdir(a_ ) for file in files: try: with open(os.path.join(a_ , a_ ) , encoding="utf-8" ) as f: __A = f.read() except UnicodeDecodeError as e: raise ValueError(F'''Could not open {os.path.join(a_ , a_ )}.''' ) from e return _artifact def UpperCAmelCase ( ) -> List[str]: """simple docstring""" class UpperCAmelCase : '''simple docstring''' def __init__( self : List[Any] ,A : str ): __A = name __A = [] def __str__( self : Tuple ): return self.name def UpperCamelCase_ ( self : Dict ,A : str ): self.paths.append({"name": self.name, "path": path} ) __A = {} __A = filter(os.path.isdir , os.listdir() ) for directory in directories: __A = directory if artifact_name not in _available_artifacts: __A = Artifact(a_ ) _available_artifacts[artifact_name].add_path(a_ ) return _available_artifacts if __name__ == "__main__": SCREAMING_SNAKE_CASE :int = get_job_links() SCREAMING_SNAKE_CASE :Optional[int] = retrieve_available_artifacts() SCREAMING_SNAKE_CASE :Dict = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' SCREAMING_SNAKE_CASE :List[Any] = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job SCREAMING_SNAKE_CASE :Union[str, Any] = github_actions_job_links.get('run_doctests') SCREAMING_SNAKE_CASE :Tuple = available_artifacts['doc_tests_gpu_test_reports'].paths[0] SCREAMING_SNAKE_CASE :Any = retrieve_artifact(artifact_path['name']) if "stats" in artifact: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = handle_test_results(artifact['stats']) SCREAMING_SNAKE_CASE :List[str] = failed SCREAMING_SNAKE_CASE :Dict = success SCREAMING_SNAKE_CASE :List[Any] = time_spent[1:-1] + ', ' SCREAMING_SNAKE_CASE :str = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): SCREAMING_SNAKE_CASE :Optional[int] = line.replace('FAILED ', '') SCREAMING_SNAKE_CASE :Any = line.split()[0].replace('\n', '') if "::" in line: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Tuple = line.split('::') else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[str] = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): SCREAMING_SNAKE_CASE :List[str] = docs[file_regex] doc_test_results[category]["failed"].append(test) SCREAMING_SNAKE_CASE :str = all_failures[test] if test in all_failures else 'N/A' SCREAMING_SNAKE_CASE :Optional[Any] = failure break SCREAMING_SNAKE_CASE :Optional[Any] = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
15
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = '''gpt_neox''' def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = rotary_pct _A = rotary_emb_base _A = attention_dropout _A = hidden_dropout _A = classifier_dropout _A = initializer_range _A = layer_norm_eps _A = use_cache _A = tie_word_embeddings _A = use_parallel_residual _A = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'''got {self.rope_scaling}''' ) _A = self.rope_scaling.get("type" , __UpperCAmelCase ) _A = self.rope_scaling.get("factor" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
79
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json', } class __A ( A_ ): '''simple docstring''' lowerCAmelCase : Optional[int] = "roc_bert" def __init__( self : List[str] ,_snake_case : Any=30_522 ,_snake_case : Union[str, Any]=768 ,_snake_case : Union[str, Any]=12 ,_snake_case : List[Any]=12 ,_snake_case : Union[str, Any]=3_072 ,_snake_case : Optional[int]="gelu" ,_snake_case : int=0.1 ,_snake_case : Any=0.1 ,_snake_case : int=512 ,_snake_case : Optional[int]=2 ,_snake_case : List[str]=0.02 ,_snake_case : Dict=1e-12 ,_snake_case : str=True ,_snake_case : Tuple=0 ,_snake_case : List[str]="absolute" ,_snake_case : Optional[Any]=None ,_snake_case : Union[str, Any]=True ,_snake_case : Optional[Any]=True ,_snake_case : List[Any]=768 ,_snake_case : Dict=910 ,_snake_case : List[str]=512 ,_snake_case : List[str]=24_858 ,_snake_case : Tuple=True ,**_snake_case : str ,) -> int: """simple docstring""" lowercase__ : Union[str, Any] = vocab_size lowercase__ : int = max_position_embeddings lowercase__ : Optional[Any] = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Tuple = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Optional[int] = initializer_range lowercase__ : int = type_vocab_size lowercase__ : int = layer_norm_eps lowercase__ : List[Any] = use_cache lowercase__ : List[str] = enable_pronunciation lowercase__ : Tuple = enable_shape lowercase__ : Optional[Any] = pronunciation_embed_dim lowercase__ : Tuple = pronunciation_vocab_size lowercase__ : Optional[Any] = shape_embed_dim lowercase__ : List[Any] = shape_vocab_size lowercase__ : int = concat_input lowercase__ : str = position_embedding_type lowercase__ : Dict = classifier_dropout super().__init__(pad_token_id=_snake_case ,**_snake_case )
16
'''simple docstring''' from PIL import Image def __lowercase ( __lowercase , __lowercase ) -> Image: '''simple docstring''' _A = (259 * (level + 255)) / (255 * (259 - level)) def contrast(__lowercase ) -> int: return int(128 + factor * (c - 128) ) return img.point(__lowercase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 lowerCamelCase_ = change_contrast(img, 1_70) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
79
0
"""simple docstring""" def _A ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : int, UpperCamelCase_ : int, UpperCamelCase_ : list[int]) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path) def _A ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[int], UpperCamelCase_ : int) -> bool: '''simple docstring''' if curr_ind == len(UpperCamelCase_): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0, len(UpperCamelCase_)): if valid_connection(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_): # Insert current vertex into path as next transition __lowercase = next_ver # Validate created path if util_hamilton_cycle(UpperCamelCase_, UpperCamelCase_, curr_ind + 1): return True # Backtrack __lowercase = -1 return False def _A ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : int = 0) -> list[int]: '''simple docstring''' __lowercase = [-1] * (len(UpperCamelCase_) + 1) # initialize start and end of path with starting index __lowercase = __lowercase = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCamelCase_, UpperCamelCase_, 1) else []
17
'''simple docstring''' def __lowercase ( __lowercase ) -> int: '''simple docstring''' assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: _A = F'''The input value of [n={number}] has to be > 0''' raise ValueError(__lowercase ) else: _A = sylvester(number - 1 ) _A = num - 1 _A = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
79
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ '''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTMAEForPreTraining''', '''ViTMAELayer''', '''ViTMAEModel''', '''ViTMAEPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''TFViTMAEForPreTraining''', '''TFViTMAEModel''', '''TFViTMAEPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
18
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCamelCase_ = logging.getLogger(__name__) def __lowercase ( __lowercase , __lowercase ) -> Optional[int]: '''simple docstring''' if os.path.exists(__lowercase ): if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile( os.path.join(__lowercase , "config.json" ) ): os.remove(os.path.join(__lowercase , "config.json" ) ) if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile( os.path.join(__lowercase , "pytorch_model.bin" ) ): os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) ) else: os.makedirs(__lowercase ) model.save_pretrained(__lowercase ) def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]: '''simple docstring''' _A = 2 if unlogit: _A = torch.pow(__lowercase , __lowercase ) _A = p * torch.log(__lowercase ) _A = 0 return -plogp.sum(dim=-1 ) def __lowercase ( __lowercase ) -> Optional[Any]: '''simple docstring''' logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) ) for row in range(len(__lowercase ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int: '''simple docstring''' _A , _A = model.config.num_hidden_layers, model.config.num_attention_heads _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) _A = torch.zeros(__lowercase , __lowercase ).to(args.device ) if head_mask is None: _A = torch.ones(__lowercase , __lowercase ).to(args.device ) head_mask.requires_grad_(requires_grad=__lowercase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _A = None _A = 0.0 _A = 0.0 for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ): _A = tuple(t.to(args.device ) for t in inputs ) ((_A) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _A = model(__lowercase , labels=__lowercase , head_mask=__lowercase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _A , _A , _A = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(__lowercase ): _A = entropy(attn.detach() , __lowercase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _A = 2 _A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("Attention entropies" ) print_ad_tensor(__lowercase ) if compute_importance: logger.info("Head importance scores" ) print_ad_tensor(__lowercase ) logger.info("Head ranked by importance scores" ) _A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _A = torch.arange( head_importance.numel() , device=args.device ) _A = head_ranks.view_as(__lowercase ) print_ad_tensor(__lowercase ) return attn_entropy, head_importance, total_loss def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase ) _A = 1 / loss # instead of downsteam score use the LM loss logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold ) _A = torch.ones_like(__lowercase ) _A = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _A = original_score while current_score >= original_score * args.masking_threshold: _A = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _A = float("Inf" ) _A = head_importance.view(-1 ).sort()[1] if len(__lowercase ) <= num_to_mask: print("BREAK BY num_to_mask" ) break # mask heads _A = current_heads_to_mask[:num_to_mask] logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) ) _A = new_head_mask.view(-1 ) _A = 0.0 _A = new_head_mask.view_as(__lowercase ) _A = new_head_mask.clone().detach() print_ad_tensor(__lowercase ) # Compute metric and head importance again _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase ) _A = 1 / loss logger.info( "Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("Final head mask" ) print_ad_tensor(__lowercase ) np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() ) return head_mask def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase ) _A = 1 / loss _A = datetime.now() - before_time _A = sum(p.numel() for p in model.parameters() ) _A = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) ) } for k, v in heads_to_prune.items(): if isinstance(__lowercase , __lowercase ): _A = [ v, ] assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(__lowercase ) _A = sum(p.numel() for p in model.parameters() ) _A = datetime.now() _A , _A , _A = compute_heads_importance( __lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , ) _A = 1 / loss _A = datetime.now() - before_time logger.info( "Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , ) logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase ) logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 ) save_model(__lowercase , args.output_dir ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' _A = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , ) parser.add_argument( "--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) # Other parameters parser.add_argument( "--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , ) parser.add_argument( "--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , ) parser.add_argument( "--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , ) parser.add_argument( "--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." ) parser.add_argument( "--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" ) parser.add_argument( "--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , ) parser.add_argument( "--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." ) parser.add_argument( "--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , ) parser.add_argument( "--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." ) parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." ) parser.add_argument( "--max_seq_length" , default=128 , type=__lowercase , help=( "The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, sequences shorter padded." ) , ) parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." ) parser.add_argument("--seed" , type=__lowercase , default=42 ) parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" ) parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" ) parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." ) parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." ) _A = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" ) _A = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _A = torch.device("cuda" , args.local_rank ) _A = 1 torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _A = nn.parallel.DistributedDataParallel( __lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase ) elif args.n_gpu > 1: _A = nn.DataParallel(__lowercase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=__lowercase ) torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) ) logger.info("Training/evaluation parameters %s" , __lowercase ) # Prepare dataset _A = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _A = (torch.from_numpy(__lowercase ),) _A = TensorDataset(*__lowercase ) _A = RandomSampler(__lowercase ) _A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(__lowercase , __lowercase , __lowercase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _A = mask_heads(__lowercase , __lowercase , __lowercase ) prune_heads(__lowercase , __lowercase , __lowercase , __lowercase ) if __name__ == "__main__": main()
79
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __A =logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE : def __init__( self , lowercase , lowercase ) -> Optional[Any]: lowerCamelCase_ = question_encoder lowerCamelCase_ = generator lowerCamelCase_ = self.question_encoder def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[int]: if os.path.isfile(lowercase ): raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' ) os.makedirs(lowercase , exist_ok=lowercase ) lowerCamelCase_ = os.path.join(lowercase , "question_encoder_tokenizer" ) lowerCamelCase_ = os.path.join(lowercase , "generator_tokenizer" ) self.question_encoder.save_pretrained(lowercase ) self.generator.save_pretrained(lowercase ) @classmethod def SCREAMING_SNAKE_CASE_( cls , lowercase , **lowercase ) -> Union[str, Any]: # dynamically import AutoTokenizer from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase_ = kwargs.pop("config" , lowercase ) if config is None: lowerCamelCase_ = RagConfig.from_pretrained(lowercase ) lowerCamelCase_ = AutoTokenizer.from_pretrained( lowercase , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) lowerCamelCase_ = AutoTokenizer.from_pretrained( lowercase , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=lowercase , generator=lowercase ) def __call__( self , *lowercase , **lowercase ) -> Dict: return self.current_tokenizer(*lowercase , **lowercase ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[Any]: return self.generator.batch_decode(*lowercase , **lowercase ) def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> List[str]: return self.generator.decode(*lowercase , **lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = self.question_encoder def SCREAMING_SNAKE_CASE_( self ) -> int: lowerCamelCase_ = self.generator def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ) -> BatchEncoding: warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , lowercase , ) if max_length is None: lowerCamelCase_ = self.current_tokenizer.model_max_length lowerCamelCase_ = self( lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase_ = self.current_tokenizer.model_max_length lowerCamelCase_ = self( text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , ) lowerCamelCase_ = labels["input_ids"] return model_inputs
19
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" snake_case = CycleDiffusionPipeline snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } snake_case = PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _A = CLIPTextModel(__UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _A = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]=0 ): '''simple docstring''' _A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _A = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith("mps" ): _A = torch.manual_seed(__UpperCAmelCase ) else: _A = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _A = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' _A = "cpu" # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' _A = self.get_dummy_components() for name, module in components.items(): if hasattr(__UpperCAmelCase , "half" ): _A = module.half() _A = CycleDiffusionPipeline(**__UpperCAmelCase ) _A = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _A = self.get_dummy_inputs(__UpperCAmelCase ) _A = pipe(**__UpperCAmelCase ) _A = output.images _A = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) _A = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCAmelCase ( self : Any ): '''simple docstring''' return super().test_save_load_local() @unittest.skip("non-deterministic pipeline" ) def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_inference_batch_single_identical() @skip_mps def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' return super().test_save_load_optional_components() @skip_mps def lowerCAmelCase ( self : str ): '''simple docstring''' return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , torch_dtype=torch.floataa , revision="fp16" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def lowerCAmelCase ( self : List[str] ): '''simple docstring''' _A = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) _A = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) _A = init_image.resize((512, 512) ) _A = "CompVis/stable-diffusion-v1-4" _A = DDIMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler" ) _A = CycleDiffusionPipeline.from_pretrained(__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _A = "A black colored car" _A = "A blue colored car" _A = torch.manual_seed(0 ) _A = pipe( prompt=__UpperCAmelCase , source_prompt=__UpperCAmelCase , image=__UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCAmelCase , output_type="np" , ) _A = output.images assert np.abs(image - expected_image ).max() < 2E-2
79
0