code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( _snake_case , _snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , _snake_case : bool , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None ):
"""simple docstring"""
super().__init__()
A__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
A__ = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ )
else:
A__ = None
A__ = torch.nn.Parameter(lowerCAmelCase__ )
class __lowerCAmelCase ( _snake_case ):
"""simple docstring"""
A__ : str = 42
A__ : Optional[Any] = 42
A__ : Tuple = 42
A__ : Optional[int] = 42
A__ : Tuple = 42
A__ : Dict = 42
def __init__( self : str , _snake_case : VQModel , _snake_case : CLIPTextModel , _snake_case : CLIPTokenizer , _snake_case : TransformeraDModel , _snake_case : VQDiffusionScheduler , _snake_case : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase__ , transformer=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
def _a ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : int ):
"""simple docstring"""
A__ = len(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else 1
# get prompt text embeddings
A__ = self.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
A__ = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
A__ = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
A__ = self.learned_classifier_free_sampling_embeddings.embeddings
A__ = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase__ , 1 , 1 )
else:
A__ = [''] * batch_size
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
lowerCAmelCase__ , padding='max_length' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' , )
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
A__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = negative_prompt_embeds.shape[1]
A__ = negative_prompt_embeds.repeat(1 , lowerCAmelCase__ , 1 )
A__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , _snake_case : Union[str, List[str]] , _snake_case : int = 1_00 , _snake_case : float = 5.0 , _snake_case : float = 1.0 , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A__ = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A__ = len(lowerCAmelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}''' )
A__ = batch_size * num_images_per_prompt
A__ = guidance_scale > 1.0
A__ = self._encode_prompt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCAmelCase__ )}.''' )
# get the initial completely masked latents unless the user supplied it
A__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
A__ = self.transformer.num_vector_embeds - 1
A__ = torch.full(lowerCAmelCase__ , lowerCAmelCase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
A__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
A__ = self.scheduler.timesteps.to(self.device )
A__ = latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the sample if we are doing classifier free guidance
A__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
A__ = self.transformer(lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ ).sample
if do_classifier_free_guidance:
A__ , A__ = model_output.chunk(2 )
A__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase__ , dim=1 , keepdim=lowerCAmelCase__ )
A__ = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ )
# remove `log(0)`'s (`-inf`s)
A__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
A__ = self.vqvae.config.vq_embed_dim
A__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
A__ = self.vqvae.quantize.get_codebook_entry(lowerCAmelCase__ , shape=lowerCAmelCase__ )
A__ = self.vqvae.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
def _a ( self : Union[str, Any] , _snake_case : torch.FloatTensor , _snake_case : float ):
"""simple docstring"""
A__ , A__ = torch.sort(lowerCAmelCase__ , 1 , descending=lowerCAmelCase__ )
A__ = torch.exp(lowerCAmelCase__ )
A__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
A__ = torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase__ )
A__ = torch.cat((all_true, keep_mask) , dim=1 )
A__ = keep_mask[:, :-1, :]
A__ = keep_mask.gather(1 , indices.argsort(1 ) )
A__ = log_p_x_0.clone()
A__ = -torch.inf # -inf = log(0)
return rv
| 701
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE__ = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
SCREAMING_SNAKE_CASE__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __lowerCAmelCase ( snake_case__ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = ["""input_ids""", """attention_mask"""]
A__ : List[Any] = NllbTokenizer
A__ : List[int] = []
A__ : List[int] = []
def __init__( self : Optional[int] , _snake_case : Union[str, Any]=None , _snake_case : List[str]=None , _snake_case : List[str]="<s>" , _snake_case : List[Any]="</s>" , _snake_case : Optional[int]="</s>" , _snake_case : Tuple="<s>" , _snake_case : Any="<unk>" , _snake_case : int="<pad>" , _snake_case : Optional[Any]="<mask>" , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : int=False , **_snake_case : Dict , ):
"""simple docstring"""
A__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
A__ = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
A__ = vocab_file
A__ = False if not self.vocab_file else True
A__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
A__ = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
A__ = src_lang if src_lang is not None else 'eng_Latn'
A__ = self.convert_tokens_to_ids(self._src_lang )
A__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self : Any ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self : Tuple , _snake_case : str ):
"""simple docstring"""
A__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self : str , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : int ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A__ = src_lang
A__ = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
A__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
A__ = tgt_lang_id
return inputs
def _a ( self : List[Any] , _snake_case : List[str] , _snake_case : str = "eng_Latn" , _snake_case : Optional[List[str]] = None , _snake_case : str = "fra_Latn" , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = src_lang
A__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _a ( self : List[Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self : Any ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self : Optional[int] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens )
A__ = self.convert_ids_to_tokens(self.suffix_tokens )
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self : Optional[Any] , _snake_case : str ):
"""simple docstring"""
A__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
A__ = []
A__ = [self.eos_token_id, self.cur_lang_code]
else:
A__ = [self.cur_lang_code]
A__ = [self.eos_token_id]
A__ = self.convert_ids_to_tokens(self.prefix_tokens )
A__ = self.convert_ids_to_tokens(self.suffix_tokens )
A__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self : str , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
A__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 702
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[int] = "efficientformer"
def __init__( self : List[Any] , _snake_case : Tuple = [3, 2, 6, 4] , _snake_case : Optional[int] = [48, 96, 2_24, 4_48] , _snake_case : Tuple = [True, True, True, True] , _snake_case : Optional[Any] = 4_48 , _snake_case : Any = 32 , _snake_case : Union[str, Any] = 4 , _snake_case : Optional[Any] = 7 , _snake_case : Union[str, Any] = 5 , _snake_case : Optional[Any] = 8 , _snake_case : str = 4 , _snake_case : Optional[int] = 0.0 , _snake_case : str = 16 , _snake_case : List[str] = 3 , _snake_case : Any = 3 , _snake_case : int = 3 , _snake_case : int = 2 , _snake_case : Optional[int] = 1 , _snake_case : Optional[Any] = 0.0 , _snake_case : Union[str, Any] = 1 , _snake_case : Union[str, Any] = True , _snake_case : Tuple = True , _snake_case : Tuple = 1E-5 , _snake_case : Optional[Any] = "gelu" , _snake_case : Optional[int] = 0.02 , _snake_case : int = 1E-12 , _snake_case : List[Any] = 2_24 , _snake_case : Any = 1E-05 , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**A_ )
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = hidden_sizes
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = initializer_range
A__ = layer_norm_eps
A__ = patch_size
A__ = num_channels
A__ = depths
A__ = mlp_expansion_ratio
A__ = downsamples
A__ = dim
A__ = key_dim
A__ = attention_ratio
A__ = resolution
A__ = pool_size
A__ = downsample_patch_size
A__ = downsample_stride
A__ = downsample_pad
A__ = drop_path_rate
A__ = num_metaad_blocks
A__ = distillation
A__ = use_layer_scale
A__ = layer_scale_init_value
A__ = image_size
A__ = batch_norm_eps
| 703
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 0
|
class __lowerCAmelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : list[list[bool]] ):
"""simple docstring"""
A__ = row
A__ = col
A__ = graph
def _a ( self : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : list[list[bool]] ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _a ( self : int , _snake_case : int , _snake_case : int , _snake_case : list[list[bool]] ):
"""simple docstring"""
A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def _a ( self : Optional[int] ): # And finally, count all islands.
"""simple docstring"""
A__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
A__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 704
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
import os
from collections.abc import Iterator
def A ( __UpperCamelCase = "." ) -> str:
for dir_path, dir_names, filenames in os.walk(_A ):
A__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_A )[1] in (".py", ".ipynb"):
yield os.path.join(_A , _A ).lstrip('./' )
def A ( __UpperCamelCase ) -> List[Any]:
return f'''{i * " "}*''' if i else "\n##"
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_A ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_A )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def A ( __UpperCamelCase = "." ) -> Optional[int]:
A__ = ""
for filepath in sorted(good_file_paths(_A ) ):
A__ = os.path.split(_A )
if filepath != old_path:
A__ = print_path(_A , _A )
A__ = (filepath.count(os.sep ) + 1) if filepath else 0
A__ = f'''{filepath}/{filename}'''.replace(' ' , '%20' )
A__ = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(f'''{md_prefix(_A )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : Optional[Any] = 42
A__ : List[Any] = 42
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = [[] for _ in range(UpperCAmelCase_ )]
A__ = size
def __getitem__( self : Any , _snake_case : int ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self._size
def _a ( self : List[Any] , _snake_case : int , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _a ( self : Tuple , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = deque([start_vertex] )
A__ = [None] * self.size
A__ = 0
while queue:
A__ = queue.popleft()
A__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A__ = current_distance + edge.weight
A__ = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
A__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
def A ( __UpperCamelCase ) -> Dict:
A__ = 0
A__ = len(__A )
for i in range(n - 1 ):
for j in range(i + 1 , __A ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def A ( __UpperCamelCase ) -> Dict:
if len(__A ) <= 1:
return arr, 0
A__ = len(__A ) // 2
A__ = arr[0:mid]
A__ = arr[mid:]
A__ = count_inversions_recursive(__A )
A__ = count_inversions_recursive(__A )
A__ = _count_cross_inversions(__A , __A )
A__ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = []
A__ = 0
while i < len(__A ) and j < len(__A ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__A ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__A ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def A ( ) -> Dict:
A__ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
A__ = count_inversions_bf(__A )
A__ = count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __A )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
A__ = count_inversions_bf(__A )
A__ = count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __A )
# an empty list should also have zero inversions
A__ = []
A__ = count_inversions_bf(__A )
A__ = count_inversions_recursive(__A )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __A )
if __name__ == "__main__":
main()
| 708
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
A__ : Optional[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _a ( self : Optional[Any] , _snake_case : str=0 ):
"""simple docstring"""
A__ = np.random.RandomState(_lowerCAmelCase )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _a ( self : Any ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : List[str] ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Dict ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = pipe(**_lowerCAmelCase ).images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A__ = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self : Any ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = 3 * [inputs['prompt']]
# forward
A__ = pipe(**_lowerCAmelCase )
A__ = output.images[0, -3:, -3:, -1]
A__ = self.get_dummy_inputs()
A__ = 3 * [inputs.pop('prompt' )]
A__ = pipe.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='np' , )
A__ = text_inputs['input_ids']
A__ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A__ = prompt_embeds
# forward
A__ = pipe(**_lowerCAmelCase )
A__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = self.get_dummy_inputs()
A__ = 3 * ['this is a negative prompt']
A__ = negative_prompt
A__ = 3 * [inputs['prompt']]
# forward
A__ = pipe(**_lowerCAmelCase )
A__ = output.images[0, -3:, -3:, -1]
A__ = self.get_dummy_inputs()
A__ = 3 * [inputs.pop('prompt' )]
A__ = []
for p in [prompt, negative_prompt]:
A__ = pipe.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='np' , )
A__ = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A__ , A__ = embeds
# forward
A__ = pipe(**_lowerCAmelCase )
A__ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
"""simple docstring"""
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = ort.SessionOptions()
A__ = False
return options
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
A__ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : Any ):
"""simple docstring"""
A__ = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
A__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = 'open neural network exchange'
A__ = np.random.RandomState(0 )
A__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='np' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : Any ):
"""simple docstring"""
A__ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
A__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = 'open neural network exchange'
A__ = np.random.RandomState(0 )
A__ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCAmelCase , output_type='np' )
A__ = output.images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = 0
def test_callback_fn(_snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> None:
A__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A__ = latents[0, -3:, -3:, -1]
A__ = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
A__ = False
A__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A__ = 'Andromeda galaxy in a bottle'
A__ = np.random.RandomState(0 )
pipe(
prompt=_lowerCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _a ( self : Tuple ):
"""simple docstring"""
A__ = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert pipe.safety_checker is None
A__ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
A__ = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A__ = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 709
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 0
|
from __future__ import annotations
def A ( __UpperCamelCase ) -> list[int]:
return [ord(lowerCamelCase__ ) - 96 for elem in plain]
def A ( __UpperCamelCase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def A ( ) -> None:
A__ = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase__ )
print('Decoded:' , decode(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 710
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 0
|
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) )
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
if b < 0:
return 1 / actual_power(_lowerCAmelCase , _lowerCAmelCase )
return actual_power(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 711
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> List[Any]:
try:
A__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ = default
else:
# KEY is set, convert it to True or False.
try:
A__ = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env('''RUN_SLOW''', default=False)
def A ( __UpperCamelCase ) -> Any:
return unittest.skip('Test was skipped' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> str:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Any:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> int:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> int:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> int:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__lowerCAmelCase )
def A ( __UpperCamelCase=None , __UpperCamelCase=None ) -> Dict:
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , __lowerCAmelCase ) , f'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( __UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__lowerCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = True
@classmethod
def _a ( cls : int ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
@classmethod
def _a ( cls : Any ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a ( self : Optional[Any] ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A ( __UpperCamelCase ) -> List[str]:
A__ = AcceleratorState()
A__ = tensor[None].clone().to(state.device )
A__ = gather(__lowerCAmelCase ).cpu()
A__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
A__ = returncode
A__ = stdout
A__ = stderr
async def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
while True:
A__ = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def A ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
A__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ = []
A__ = []
def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ):
A__ = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def A ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=180 , __UpperCamelCase=False , __UpperCamelCase=True ) -> _RunOutput:
A__ = asyncio.get_event_loop()
A__ = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
A__ = """ """.join(__lowerCAmelCase )
if result.returncode > 0:
A__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> str:
try:
A__ = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , 'decode' ):
A__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 712
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 0
|
from collections.abc import Sequence
from queue import Queue
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]=None , _snake_case : List[str]=None ):
"""simple docstring"""
A__ = start
A__ = end
A__ = val
A__ = (start + end) // 2
A__ = left
A__ = right
def __repr__( self : List[str] ):
"""simple docstring"""
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , _snake_case : Sequence , _snake_case : int ):
"""simple docstring"""
A__ = collection
A__ = function
if self.collection:
A__ = self._build_tree(0 , len(_snake_case ) - 1 )
def _a ( self : Optional[Any] , _snake_case : Tuple , _snake_case : int ):
"""simple docstring"""
self._update_tree(self.root , _snake_case , _snake_case )
def _a ( self : Dict , _snake_case : List[Any] , _snake_case : List[Any] ):
"""simple docstring"""
return self._query_range(self.root , _snake_case , _snake_case )
def _a ( self : str , _snake_case : Dict , _snake_case : Optional[Any] ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_snake_case , _snake_case , self.collection[start] )
A__ = (start + end) // 2
A__ = self._build_tree(_snake_case , _snake_case )
A__ = self._build_tree(mid + 1 , _snake_case )
return SegmentTreeNode(_snake_case , _snake_case , self.fn(left.val , right.val ) , _snake_case , _snake_case )
def _a ( self : Dict , _snake_case : Optional[Any] , _snake_case : str , _snake_case : str ):
"""simple docstring"""
if node.start == i and node.end == i:
A__ = val
return
if i <= node.mid:
self._update_tree(node.left , _snake_case , _snake_case )
else:
self._update_tree(node.right , _snake_case , _snake_case )
A__ = self.fn(node.left.val , node.right.val )
def _a ( self : int , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _snake_case , _snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , _snake_case ) , )
else:
# range in right child tree
return self._query_range(node.right , _snake_case , _snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
if self.root is not None:
A__ = Queue()
queue.put(self.root )
while not queue.empty():
A__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 5_0)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 713
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 0
|
from __future__ import annotations
def A ( __UpperCamelCase ) -> Dict:
A__ = str(lowerCAmelCase__ )
return n == n[::-1]
def A ( __UpperCamelCase = 1_000_000 ) -> List[Any]:
A__ = 0
for i in range(1 , lowerCAmelCase__ ):
if is_palindrome(lowerCAmelCase__ ) and is_palindrome(bin(lowerCAmelCase__ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 714
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
A__ = """ylacombe/bark-small"""
A__ = tempfile.mkdtemp()
A__ = """en_speaker_1"""
A__ = """This is a test string"""
A__ = """speaker_embeddings_path.json"""
A__ = """speaker_embeddings"""
def _a ( self : Dict , **_snake_case : int ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCamelCase )
def _a ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
A__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
A__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
A__ = 35
A__ = 2
A__ = 8
A__ = {
"""semantic_prompt""": np.ones(_UpperCamelCase ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A__ = processor(text=self.input_string , voice_preset=_UpperCamelCase )
A__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
A__ = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_UpperCamelCase , **_UpperCamelCase )
A__ = processor(text=self.input_string , voice_preset=_UpperCamelCase )
A__ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
A__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=_UpperCamelCase )
A__ = processor(text=self.input_string )
A__ = tokenizer(
self.input_string , padding='max_length' , max_length=2_56 , add_special_tokens=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 715
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowerCAmelCase ( a__ ):
"""simple docstring"""
def __init__( self : str , _snake_case : Dict , _snake_case : int=13 , _snake_case : str=7 , _snake_case : Tuple=True , _snake_case : Dict=True , _snake_case : int=False , _snake_case : Tuple=True , _snake_case : Union[str, Any]=99 , _snake_case : Union[str, Any]=32 , _snake_case : Tuple=5 , _snake_case : int=4 , _snake_case : Dict=37 , _snake_case : Any="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=5_12 , _snake_case : Dict=16 , _snake_case : Dict=2 , _snake_case : List[Any]=0.02 , _snake_case : int=3 , _snake_case : Union[str, Any]=4 , _snake_case : List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[int] ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self : List[Any] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = DistilBertModel(config=_A )
model.to(_A )
model.eval()
A__ = model(_A , _A )
A__ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Any , _snake_case : str , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
A__ = DistilBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
A__ = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Optional[int] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = DistilBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
A__ = model(
_A , attention_mask=_A , start_positions=_A , end_positions=_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : int , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
A__ = self.num_labels
A__ = DistilBertForSequenceClassification(_A )
model.to(_A )
model.eval()
A__ = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : Any , _snake_case : int , _snake_case : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = DistilBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
A__ = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.num_choices
A__ = DistilBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
_A , attention_mask=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
A__ : List[str] = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : int = True
A__ : str = True
A__ : Optional[int] = True
A__ : Union[str, Any] = True
def _a ( self : Tuple ):
"""simple docstring"""
A__ = DistilBertModelTester(self )
A__ = ConfigTester(self , config_class=_A , dim=37 )
def _a ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@slow
@require_torch_gpu
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A__ = True
A__ = model_class(config=_A )
A__ = self._prepare_for_class(_A , _A )
A__ = torch.jit.trace(
_A , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_A , os.path.join(_A , 'traced_model.pt' ) )
A__ = torch.jit.load(os.path.join(_A , 'traced_model.pt' ) , map_location=_A )
loaded(inputs_dict['input_ids'].to(_A ) , inputs_dict['attention_mask'].to(_A ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = DistilBertModel.from_pretrained('distilbert-base-uncased' )
A__ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(_A , attention_mask=_A )[0]
A__ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _A )
A__ = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) )
| 716
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 0
|
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def A ( ) -> Optional[int]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE__ = "\\n\n"
SCREAMING_SNAKE_CASE__ = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _a ( self : str , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : int = 16 , _snake_case : bool = True , _snake_case : Dict=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
A__ = '''cuda'''
else:
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ = AutoModelForCausalLM.from_pretrained(lowerCamelCase_ )
A__ = model.to(lowerCamelCase_ )
A__ = AutoTokenizer.from_pretrained(lowerCamelCase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
A__ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCamelCase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
A__ = model.config.max_length - 1
else:
A__ = model.config.max_length
A__ = tokenizer(
lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors='pt' , return_attention_mask=lowerCamelCase_ , ).to(lowerCamelCase_ )
A__ = encodings['''input_ids''']
A__ = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
A__ = []
A__ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ) ):
A__ = min(start_index + batch_size , len(lowerCamelCase_ ) )
A__ = encoded_texts[start_index:end_index]
A__ = attn_masks[start_index:end_index]
if add_start_token:
A__ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCamelCase_ )
A__ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
A__ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCamelCase_ ), attn_mask] , dim=1 )
A__ = encoded_batch
with torch.no_grad():
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).logits
A__ = out_logits[..., :-1, :].contiguous()
A__ = labels[..., 1:].contiguous()
A__ = attn_mask[..., 1:].contiguous()
A__ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCamelCase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCamelCase_ )}
| 718
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
A__ : str = 'markuplm'
def __init__( self : Dict , _snake_case : List[str]=3_05_22 , _snake_case : Tuple=7_68 , _snake_case : int=12 , _snake_case : Any=12 , _snake_case : Optional[Any]=30_72 , _snake_case : str="gelu" , _snake_case : Dict=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]=5_12 , _snake_case : str=2 , _snake_case : List[str]=0.02 , _snake_case : Optional[int]=1E-12 , _snake_case : Optional[Any]=0 , _snake_case : Dict=0 , _snake_case : Dict=2 , _snake_case : int=2_56 , _snake_case : Union[str, Any]=10_24 , _snake_case : Optional[int]=2_16 , _snake_case : List[Any]=10_01 , _snake_case : Optional[int]=32 , _snake_case : List[str]=50 , _snake_case : List[Any]="absolute" , _snake_case : Optional[Any]=True , _snake_case : int=None , **_snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
# additional properties
A__ = max_depth
A__ = max_xpath_tag_unit_embeddings
A__ = max_xpath_subs_unit_embeddings
A__ = tag_pad_id
A__ = subs_pad_id
A__ = xpath_unit_hidden_size
| 719
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : str , _snake_case : int=3 , _snake_case : Dict=32 , _snake_case : List[Any]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Union[str, Any]=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : Dict=True , _snake_case : List[str]=True , _snake_case : Dict="relu" , _snake_case : Dict=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(snake_case_ )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = self.get_config()
return config, pixel_values
def _a ( self : Tuple ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _a ( self : Any , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
A__ = FlaxRegNetModel(config=snake_case_ )
A__ = model(snake_case_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : int , _snake_case : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.num_labels
A__ = FlaxRegNetForImageClassification(config=snake_case_ )
A__ = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( _a , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ : List[Any] = False
A__ : Optional[Any] = False
A__ : int = False
def _a ( self : List[str] ):
"""simple docstring"""
A__ = FlaxRegNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def _a ( self : List[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : int ):
"""simple docstring"""
return
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def _a ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : Tuple ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def _a ( self : Any ):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] ):
A__ = model_class(snake_case_ )
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(snake_case_ , snake_case_ )
A__ = model_class(snake_case_ )
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : str ):
return model(pixel_values=snake_case_ , **snake_case_ )
with self.subTest('JIT Enabled' ):
A__ = model_jitted(**snake_case_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
A__ = model_jitted(**snake_case_ ).to_tuple()
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for jitted_output, output in zip(snake_case_ , snake_case_ ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( ) -> List[Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : int ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=snake_case_ , return_tensors='np' )
A__ = model(**snake_case_ )
# verify the logits
A__ = (1, 10_00)
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 720
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 0
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase=None , __UpperCamelCase=None ) -> str:
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : Union[str, Any] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
A__ : str = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
A__ : List[str] = list_field(
default=[8, 32, 1_28, 5_12] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
A__ : List[Any] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
A__ : List[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
A__ : Any = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
A__ : int = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Use FP16 to accelerate inference."} )
A__ : Union[str, Any] = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Benchmark training of model"} )
A__ : List[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Verbose memory tracing"} )
A__ : Tuple = field(
default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
A__ : Tuple = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
A__ : Optional[Any] = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Trace memory line by line"} )
A__ : Optional[Any] = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Save result to a CSV file"} )
A__ : int = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Save all print statements in a log file"} )
A__ : List[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={"help": "Whether to print environment information"} )
A__ : str = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
A__ : Tuple = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
A__ : List[str] = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
A__ : List[str] = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
A__ : List[str] = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
A__ : int = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
A__ : List[Any] = field(
default=f"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
A__ : str = field(default=3 , metadata={"help": "Times an experiment will be run."} )
A__ : Tuple = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _a ( self : Dict ):
"""simple docstring"""
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _lowercase , )
def _a ( self : Optional[int] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _a ( self : str ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def _a ( self : List[str] ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 721
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( __UpperCamelCase ) -> Dict:
A__ = os.path.join(args.tf_model_dir , 'parameters.json' )
A__ = json.loads(open(snake_case__ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
A__ = args.output + '.pt'
A__ = OrderedDict()
with tf.device('/CPU:0' ):
A__ = tf.train.load_checkpoint(args.tf_model_dir )
A__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A__ = reader.get_tensor(snake_case__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
A__ = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
A__ = 8
A__ = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name.startswith('model/moe' ):
A__ = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
A__ = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/softmlp/kernel' ):
A__ = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
A__ = key_name[-9:-7]
for i in range(16 ):
A__ = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
A__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A__ = torch.tensor(snake_case__ )
elif key_name.startswith('model/mlp' ):
A__ = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
A__ = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/p1/bias' ):
A__ = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/p2/kernel' ):
A__ = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/p2/bias' ):
A__ = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
elif key_name.startswith('model/ln' ):
A__ = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
A__ = 'model.blocks.%d.feed_forward.norm.bias' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/g' ):
A__ = 'model.blocks.%d.feed_forward.norm.weight' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
elif key_name.startswith('model/att' ):
A__ = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
A__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A__ = state[:, 0, :, :]
A__ = state[:, 1, :, :]
A__ = state[:, 2, :, :]
A__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
A__ = torch.tensor(snake_case__ )
A__ = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
A__ = torch.tensor(snake_case__ )
A__ = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/o/kernel' ):
A__ = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
A__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name.startswith('model/an' ):
A__ = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
A__ = 'model.blocks.%d.self_attn.norm.bias' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
elif key_name.endswith('/g' ):
A__ = 'model.blocks.%d.self_attn.norm.weight' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
A__ = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
A__ = 'model.%s.weight' % nlayer
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(snake_case__ )
if key_name.startswith('model/wte' ):
A__ = 'lm_head.weight'
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(snake_case__ )
elif key_name.startswith('model/wob' ):
A__ = 'final_logits_bias'
A__ = vnp.copy() # same in embedded
A__ = state.reshape((1, -1) )
A__ = torch.tensor(snake_case__ )
elif key_name == "model/dense/kernel":
A__ = 'model.last_project.weight'
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(snake_case__ )
elif key_name == "model/dense_1/bias":
A__ = 'model.last_project.bias'
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(snake_case__ )
torch.save(snake_case__ , args.output )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 700
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : Union[str, Any] = 'vision-encoder-decoder'
A__ : List[str] = True
def __init__( self : Union[str, Any] , **_snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(**_snake_case )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
A__ = kwargs.pop('encoder' )
A__ = encoder_config.pop('model_type' )
A__ = kwargs.pop('decoder' )
A__ = decoder_config.pop('model_type' )
A__ = AutoConfig.for_model(_snake_case , **_snake_case )
A__ = AutoConfig.for_model(_snake_case , **_snake_case )
A__ = True
@classmethod
def _a ( cls : Union[str, Any] , _snake_case : Dict , _snake_case : str , **_snake_case : int ):
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_snake_case )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : Any = version.parse("1.11" )
@property
def _a ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
@property
def _a ( self : List[str] ):
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
@property
def _a ( self : List[str] ):
"""simple docstring"""
A__ = OrderedDict()
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
A__ = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _a ( self : List[Any] , _snake_case : Any , _snake_case : Optional[Any] = -1 , _snake_case : Any = -1 , _snake_case : Optional[Any] = False , _snake_case : int = None , ):
"""simple docstring"""
import torch
A__ = OrderedDict()
A__ = super().generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case )
A__ , A__ = dummy_input['input_ids'].shape
A__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
A__ = dummy_input.pop('input_ids' )
A__ = dummy_input.pop('attention_mask' )
A__ = torch.zeros(_snake_case )
return common_inputs
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] , _snake_case : Tuple ):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_snake_case )
def _a ( self : int , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any = "default" ):
"""simple docstring"""
A__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_snake_case , _snake_case )
| 701
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 0
|
from __future__ import annotations
def A ( __UpperCamelCase ) -> str:
if len(snake_case_ ) == 0:
return []
A__ = min(snake_case_ ), max(snake_case_ )
A__ = int(max_value - min_value ) + 1
A__ = [[] for _ in range(snake_case_ )]
for i in my_list:
buckets[int(i - min_value )].append(snake_case_ )
return [v for bucket in buckets for v in sorted(snake_case_ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 703
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : Optional[Any] = "ibert"
def __init__( self : Optional[Any] , _snake_case : Optional[int]=3_05_22 , _snake_case : Tuple=7_68 , _snake_case : Tuple=12 , _snake_case : List[str]=12 , _snake_case : str=30_72 , _snake_case : str="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Any=0.1 , _snake_case : Tuple=5_12 , _snake_case : Dict=2 , _snake_case : str=0.02 , _snake_case : Union[str, Any]=1E-12 , _snake_case : Tuple=1 , _snake_case : List[Any]=0 , _snake_case : Optional[Any]=2 , _snake_case : List[Any]="absolute" , _snake_case : str=False , _snake_case : Any="none" , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = quant_mode
A__ = force_dequant
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
@property
def _a ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 704
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Dict:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : Optional[int] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : List[str] = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : Union[str, Any] = field(metadata={"help": "Should contain the data files for the task."} )
A__ : Optional[int] = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : Union[str, Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
'''simple docstring'''
from __future__ import annotations
def A ( __UpperCamelCase ) -> str:
A__ = 2
A__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# TODO: upload to AWS
SCREAMING_SNAKE_CASE__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = "retribert"
def __init__( self : str , _snake_case : Any=3_05_22 , _snake_case : Tuple=7_68 , _snake_case : List[str]=8 , _snake_case : Any=12 , _snake_case : Optional[int]=30_72 , _snake_case : List[Any]="gelu" , _snake_case : int=0.1 , _snake_case : Tuple=0.1 , _snake_case : List[Any]=5_12 , _snake_case : Tuple=2 , _snake_case : Dict=0.02 , _snake_case : Dict=1E-12 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=1_28 , _snake_case : str=0 , **_snake_case : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = share_encoders
A__ = projection_dim
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="attention" ) -> Tuple:
A__ = params[f'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
A__ = params[f'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
A__ = params[f'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
A__ = params[f'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Optional[int]:
if split_mlp_wi:
A__ = params[f'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
A__ = params[f'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
A__ = (wi_a, wi_a)
else:
A__ = params[f'''{prefix}/layers_{i}/mlp/wi/kernel''']
A__ = params[f'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
return params[f'''{prefix}/layers_{i}/{layer_name}/scale''']
def A ( __UpperCamelCase , *, __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = traverse_util.flatten_dict(variables['target'] )
A__ = {"""/""".join(UpperCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = """encoder/layers_0/mlp/wi_0/kernel""" in old
print('Split MLP:' , UpperCAmelCase__ )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , 'pre_attention_layer_norm' )
A__ = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , 'attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , 'pre_mlp_layer_norm' )
A__ = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'encoder' , UpperCAmelCase__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
"""encoder/relpos_bias/rel_embedding"""
].T
A__ = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'pre_self_attention_layer_norm' )
A__ = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'self_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'pre_cross_attention_layer_norm' )
A__ = tax_attention_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'encoder_decoder_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , 'pre_mlp_layer_norm' )
A__ = tax_mlp_lookup(UpperCAmelCase__ , UpperCAmelCase__ , 'decoder' , UpperCAmelCase__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old["""decoder/decoder_norm/scale"""]
A__ = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old["""decoder/logits_dense/kernel"""].T
return new
def A ( __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
A__ = state_dict["""shared.weight"""]
return state_dict
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
A__ = convert_tax_to_pytorch(UpperCAmelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase__ )
A__ = make_state_dict(UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = TaConfig.from_json_file(UpperCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(UpperCAmelCase__ )
else:
A__ = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase__ )
print('Done' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 708
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 0
|
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE__ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
SCREAMING_SNAKE_CASE__ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = SavedModel()
A__ = []
with open(os.path.join(_lowercase , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
A__ = json.load(_lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_lowercase )] )
with open(_lowercase , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
A__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
A__ = sorted(_lowercase )
A__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_lowercase )
if strict and len(_lowercase ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_lowercase ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_lowercase , sep='\n' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 709
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 0
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(UpperCAmelCase__ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
A__ = pipeline.text_encoder
else:
A__ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
A__ = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
A__ = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.base_model_path
SCREAMING_SNAKE_CASE__ = args.checkpoint_path
SCREAMING_SNAKE_CASE__ = args.dump_path
SCREAMING_SNAKE_CASE__ = args.lora_prefix_unet
SCREAMING_SNAKE_CASE__ = args.lora_prefix_text_encoder
SCREAMING_SNAKE_CASE__ = args.alpha
SCREAMING_SNAKE_CASE__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
SCREAMING_SNAKE_CASE__ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 710
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 0
|
SCREAMING_SNAKE_CASE__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 711
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : Union[str, Any]=13 , _snake_case : List[Any]=7 , _snake_case : int=True , _snake_case : Optional[int]=True , _snake_case : int=True , _snake_case : List[Any]=True , _snake_case : List[Any]=99 , _snake_case : Union[str, Any]=24 , _snake_case : Any=2 , _snake_case : str=6 , _snake_case : Optional[Any]=37 , _snake_case : str="gelu" , _snake_case : Dict=0.1 , _snake_case : int=0.1 , _snake_case : int=5_12 , _snake_case : Dict=16 , _snake_case : List[Any]=2 , _snake_case : Dict=0.02 , _snake_case : Union[str, Any]=3 , _snake_case : str=None , _snake_case : str=10_00 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = scope
A__ = range_bbox
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
A__ = bbox[i, j, 3]
A__ = bbox[i, j, 1]
A__ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
A__ = bbox[i, j, 2]
A__ = bbox[i, j, 0]
A__ = t
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _a ( self : List[Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : str , _snake_case : str , _snake_case : Dict , ):
"""simple docstring"""
A__ = LiltModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A__ = model(UpperCamelCase__ , bbox=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self : Any , _snake_case : List[Any] , _snake_case : int , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Any , ):
"""simple docstring"""
A__ = self.num_labels
A__ = LiltForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Dict , ):
"""simple docstring"""
A__ = LiltForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : str ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
A__
) = config_and_inputs
A__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : Optional[int] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : int = False
A__ : Union[str, Any] = False
def _a ( self : List[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : str , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
return True
def _a ( self : List[str] ):
"""simple docstring"""
A__ = LiltModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _a ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = LiltModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
A__ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(UpperCamelCase__ )
A__ = torch.tensor([[1, 2]] , device=UpperCamelCase__ )
A__ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ )
A__ = torch.Size([1, 2, 7_68] )
A__ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCamelCase__ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase__ , atol=1E-3 ) )
| 712
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 713
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
A__ : List[Any] = 'nat'
A__ : Optional[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , _snake_case : Optional[int]=4 , _snake_case : int=3 , _snake_case : Any=64 , _snake_case : Union[str, Any]=[3, 4, 6, 5] , _snake_case : Optional[Any]=[2, 4, 8, 16] , _snake_case : Any=7 , _snake_case : int=3.0 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=0.0 , _snake_case : int=0.0 , _snake_case : List[Any]=0.1 , _snake_case : Dict="gelu" , _snake_case : Dict=0.02 , _snake_case : Optional[Any]=1E-5 , _snake_case : str=0.0 , _snake_case : Dict=None , _snake_case : Dict=None , **_snake_case : str , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(_snake_case )
A__ = num_heads
A__ = kernel_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = layer_norm_eps
A__ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
A__ = layer_scale_init_value
A__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(_snake_case ) + 1 )]
A__ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names )
| 714
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : str = """align_text_model"""
def __init__( self : List[Any] , _snake_case : str=3_05_22 , _snake_case : Optional[int]=7_68 , _snake_case : Union[str, Any]=12 , _snake_case : List[str]=12 , _snake_case : List[str]=30_72 , _snake_case : Optional[int]="gelu" , _snake_case : Optional[Any]=0.1 , _snake_case : int=0.1 , _snake_case : Optional[int]=5_12 , _snake_case : Tuple=2 , _snake_case : str=0.02 , _snake_case : str=1E-12 , _snake_case : int=0 , _snake_case : int="absolute" , _snake_case : Any=True , **_snake_case : List[str] , ):
"""simple docstring"""
super().__init__(**__lowercase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = pad_token_id
@classmethod
def _a ( cls : Optional[int] , _snake_case : Union[str, os.PathLike] , **_snake_case : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowercase )
A__ , A__ = cls.get_config_dict(__lowercase , **__lowercase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
A__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : str = """align_vision_model"""
def __init__( self : Union[str, Any] , _snake_case : int = 3 , _snake_case : int = 6_00 , _snake_case : float = 2.0 , _snake_case : float = 3.1 , _snake_case : int = 8 , _snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , _snake_case : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , _snake_case : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , _snake_case : List[int] = [] , _snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , _snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , _snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , _snake_case : float = 0.25 , _snake_case : str = "swish" , _snake_case : int = 25_60 , _snake_case : str = "mean" , _snake_case : float = 0.02 , _snake_case : float = 0.001 , _snake_case : float = 0.99 , _snake_case : float = 0.2 , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowercase )
A__ = num_channels
A__ = image_size
A__ = width_coefficient
A__ = depth_coefficient
A__ = depth_divisor
A__ = kernel_sizes
A__ = in_channels
A__ = out_channels
A__ = depthwise_padding
A__ = strides
A__ = num_block_repeats
A__ = expand_ratios
A__ = squeeze_expansion_ratio
A__ = hidden_act
A__ = hidden_dim
A__ = pooling_type
A__ = initializer_range
A__ = batch_norm_eps
A__ = batch_norm_momentum
A__ = drop_connect_rate
A__ = sum(__lowercase ) * 4
@classmethod
def _a ( cls : Optional[Any] , _snake_case : Union[str, os.PathLike] , **_snake_case : Dict ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowercase )
A__ , A__ = cls.get_config_dict(__lowercase , **__lowercase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
A__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__lowercase , **__lowercase )
class __lowerCAmelCase ( lowercase__ ):
"""simple docstring"""
A__ : Union[str, Any] = """align"""
A__ : Optional[Any] = True
def __init__( self : List[Any] , _snake_case : int=None , _snake_case : Tuple=None , _snake_case : Any=6_40 , _snake_case : Tuple=1.0 , _snake_case : Optional[int]=0.02 , **_snake_case : str , ):
"""simple docstring"""
super().__init__(**__lowercase )
if text_config is None:
A__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
A__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
A__ = AlignTextConfig(**__lowercase )
A__ = AlignVisionConfig(**__lowercase )
A__ = projection_dim
A__ = temperature_init_value
A__ = initializer_range
@classmethod
def _a ( cls : Tuple , _snake_case : AlignTextConfig , _snake_case : AlignVisionConfig , **_snake_case : int ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowercase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 715
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A__ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
A__ = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
A__ = model(A_ )["last_hidden_state"]
A__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 716
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = KandinskyImgaImgPipeline
A__ : Union[str, Any] = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
A__ : List[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
A__ : Any = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A__ : Union[str, Any] = False
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 32
@property
def _a ( self : int ):
"""simple docstring"""
return 32
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return 1_00
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A__ = MultilingualCLIP(_snake_case )
A__ = text_encoder.eval()
return text_encoder
@property
def _a ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A__ = UNetaDConditionModel(**_snake_case )
return model
@property
def _a ( self : int ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.dummy_text_encoder
A__ = self.dummy_tokenizer
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
A__ = DDIMScheduler(**_snake_case )
A__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _a ( self : int , _snake_case : Tuple , _snake_case : Dict=0 ):
"""simple docstring"""
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def _a ( self : str ):
"""simple docstring"""
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_snake_case )
A__ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
A__ = pipe(**self.get_dummy_inputs(_snake_case ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(_snake_case ) , return_dict=_snake_case , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Tuple ):
"""simple docstring"""
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
A__ = 'A red cartoon frog, 4k'
A__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
A__ = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
A__ = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ , A__ = pipe_prior(
_snake_case , generator=_snake_case , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
A__ = pipeline(
_snake_case , image=_snake_case , image_embeds=_snake_case , negative_image_embeds=_snake_case , generator=_snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 717
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = BlenderbotSmallTokenizer
A__ : List[Any] = False
def _a ( self : int ):
"""simple docstring"""
super().setUp()
A__ = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
A__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
A__ = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
A__ = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase__ ) )
def _a ( self : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _a ( self : Any , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = "adapt act apte"
A__ = "adapt act apte"
return input_text, output_text
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = "adapt act apte"
A__ = ["adapt", "act", "ap@@", "te"]
A__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
A__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [13_84]
A__ = "I am a small frog."
A__ = tok([src_text] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )["input_ids"]
A__ = tok.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
A__ = "I am a small frog ."
A__ = "."
A__ = tok(lowerCamelCase__ )["input_ids"]
A__ = tok(lowerCamelCase__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 718
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( __a ):
"""simple docstring"""
A__ : List[Any] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : int , **_snake_case : int ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A__ = deprecated_arg[3:]
setattr(self , lowerCAmelCase_ , not kwargs.pop(lowerCAmelCase_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
A__ = kwargs.pop('torchscript' , self.torchscript )
A__ = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
A__ = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase_ )
A__ : bool = field(default=__a , metadata={"help": "Trace the models using torchscript"} )
A__ : bool = field(default=__a , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
A__ : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _a ( self : Any ):
"""simple docstring"""
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
A__ = torch.device('cpu' )
A__ = 0
elif is_torch_tpu_available():
A__ = xm.xla_device()
A__ = 0
else:
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
A__ = torch.cuda.device_count()
return device, n_gpu
@property
def _a ( self : str ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def _a ( self : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _a ( self : Dict ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def _a ( self : Tuple ):
"""simple docstring"""
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def _a ( self : str ):
"""simple docstring"""
return self.n_gpu > 0
| 719
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowerCAmelCase ( __lowercase ):
"""simple docstring"""
A__ : Dict = '''openai-gpt'''
A__ : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , _snake_case : str=4_04_78 , _snake_case : str=5_12 , _snake_case : Any=7_68 , _snake_case : Any=12 , _snake_case : Any=12 , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : List[str]=0.1 , _snake_case : Any=0.1 , _snake_case : Dict=1E-5 , _snake_case : Optional[int]=0.02 , _snake_case : List[Any]="cls_index" , _snake_case : List[str]=True , _snake_case : Dict=None , _snake_case : Optional[Any]=True , _snake_case : Any=0.1 , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = afn
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = summary_type
A__ = summary_use_proj
A__ = summary_activation
A__ = summary_first_dropout
A__ = summary_proj_to_labels
super().__init__(**__a )
| 720
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def A ( __UpperCamelCase ) -> Optional[int]:
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('/' )
A__ = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
A__ = torch.load(os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
A__ = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=_UpperCamelCase , threshold=_UpperCamelCase )
A__ = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f'''{prefix_}mask_scores''']
A__ = TopKBinarizer.apply(_UpperCamelCase , _UpperCamelCase )
A__ = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f'''{prefix_}mask_scores''']
A__ = ThresholdBinarizer.apply(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
A__ = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f'''{prefix_}mask_scores''']
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(_UpperCamelCase )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(_UpperCamelCase ) , f'''bertarized_{os.path.basename(_UpperCamelCase )}''' )
if not os.path.isdir(_UpperCamelCase ):
shutil.copytree(_UpperCamelCase , _UpperCamelCase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 721
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 0
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __a , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = None
A__ : List[Any] = BloomTokenizerFast
A__ : Optional[Any] = BloomTokenizerFast
A__ : Dict = True
A__ : Optional[Any] = False
A__ : Tuple = "tokenizer_file"
A__ : Union[str, Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def _a ( self : Dict ):
"""simple docstring"""
super().setUp()
A__ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[int] , **_snake_case : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
A__ = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
A__ = tokenizer.batch_encode_plus(A__ )['input_ids']
self.assertListEqual(A__ , A__ )
A__ = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def _a ( self : Union[str, Any] , _snake_case : List[str]=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(A__ , max_length=A__ )
tokenizer_r.encode_plus(A__ , max_length=A__ )
tokenizer_r.batch_encode_plus(A__ , max_length=A__ )
tokenizer_r.encode(A__ , max_length=A__ )
tokenizer_r.batch_encode_plus(A__ , max_length=A__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
A__ = None # Hotfixing padding = None
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='max_length' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='max_length' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='max_length' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='max_length' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='max_length' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='max_length' , )
def _a ( self : str ):
"""simple docstring"""
A__ = self.get_rust_tokenizer()
A__ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=A__ )
A__ = next(iter(A__ ) )['premise'] # pick up one data
A__ = list(sample_data.values() )
A__ = list(map(tokenizer.encode , A__ ) )
A__ = [tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ ) for x in output_tokens]
self.assertListEqual(A__ , A__ )
def _a ( self : int ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 700
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
SCREAMING_SNAKE_CASE__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
for attribute in key.split('.' ):
A__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
A__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.feature_extractor
A__ = hf_model.adapter
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
A__ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
A__ = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "bias" in name:
A__ = 'bias'
elif "weight" in name:
A__ = 'weight'
else:
A__ = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
A__ = full_name.split('conv_layers.' )[-1]
A__ = name.split('.' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = full_name.split('adaptor.' )[-1]
A__ = name.split('.' )
if items[1].isdigit():
A__ = int(items[1] )
else:
A__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
A__ = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
A__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
A__ = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
A__ = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
A__ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
A__ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
A__ = emb.weight.data
return lin_layer
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
A__ = WavaVecaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , add_adapter=_SCREAMING_SNAKE_CASE , adapter_stride=_SCREAMING_SNAKE_CASE , adapter_kernel_size=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , output_hidden_size=_SCREAMING_SNAKE_CASE , )
A__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
# load model
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
A__ = model[0].eval()
# load feature extractor
A__ = WavaVecaFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
A__ = WavaVecaModel(_SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
# load decoder weights
A__ = MBartForCausalLM(_SCREAMING_SNAKE_CASE )
A__ , A__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
A__ = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
A__ = False
A__ = MBartaaTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
A__ = hf_wavavec.config.to_dict()
A__ = tokenizer.pad_token_id
A__ = tokenizer.bos_token_id
A__ = tokenizer.eos_token_id
A__ = 'mbart50'
A__ = 'wav2vec2'
A__ = tokenizer.eos_token_id
A__ = 250_004
A__ = tokenizer.eos_token_id
A__ = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_0_2_4, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=2_5_0_0_0_4, type=int, help='''`decoder_start_token_id` of model config''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 701
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( __snake_case ):
A__ : List[str] = "vivit"
def __init__( self : Optional[Any] , _snake_case : int=2_24 , _snake_case : Any=32 , _snake_case : Optional[Any]=[2, 16, 16] , _snake_case : str=3 , _snake_case : str=7_68 , _snake_case : Optional[Any]=12 , _snake_case : Dict=12 , _snake_case : Dict=30_72 , _snake_case : Union[str, Any]="gelu_fast" , _snake_case : List[str]=0.0 , _snake_case : List[str]=0.0 , _snake_case : str=0.02 , _snake_case : Optional[Any]=1E-06 , _snake_case : List[str]=True , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = num_frames
A__ = tubelet_size
A__ = num_channels
A__ = qkv_bias
super().__init__(**__UpperCamelCase )
| 702
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 0
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : int , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = """gelu"""
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : str , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = TFDistilBertModel(config=lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
A__ = [input_ids, input_mask]
A__ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = TFDistilBertForMaskedLM(config=lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : Dict , _snake_case : Any ):
"""simple docstring"""
A__ = TFDistilBertForQuestionAnswering(config=lowercase__ )
A__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
A__ = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int , _snake_case : Optional[int] , _snake_case : str ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : int , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : int ):
"""simple docstring"""
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(lowercase__ )
A__ = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
A__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[str] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Any , _snake_case : int , _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(lowercase__ )
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A__ = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(A__) = config_and_inputs
A__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
A__ : Dict = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Tuple = False
A__ : int = False
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=lowercase__ , dim=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase__ )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase__ )
A__ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-4 )
| 704
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : str = "naver-clova-ix/donut-base-finetuned-docvqa"
A__ : Union[str, Any] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
A__ : List[Any] = "document_qa"
A__ : Tuple = AutoProcessor
A__ : List[str] = VisionEncoderDecoderModel
A__ : str = ["image", "text"]
A__ : List[str] = ["text"]
def __init__( self : Union[str, Any] , *_snake_case : Tuple , **_snake_case : int ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__snake_case , **__snake_case )
def _a ( self : Dict , _snake_case : "Image" , _snake_case : str ):
"""simple docstring"""
A__ = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A__ = task_prompt.replace('{user_input}' , __snake_case )
A__ = self.pre_processor.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors='pt' ).input_ids
A__ = self.pre_processor(__snake_case , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Optional[int] , _snake_case : Tuple ):
"""simple docstring"""
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__snake_case , ).sequences
def _a ( self : Dict , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = self.pre_processor.batch_decode(__snake_case )[0]
A__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
A__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
A__ = re.sub(R'<.*?>' , '' , __snake_case , count=1 ).strip() # remove first task start token
A__ = self.pre_processor.tokenajson(__snake_case )
return sequence["answer"]
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
'''simple docstring'''
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = len(__UpperCamelCase )
print('The following activities are selected:' )
# The first activity is always selected
A__ = 0
print(__UpperCamelCase , end=',' )
# Consider rest of the activities
for j in range(__UpperCamelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__UpperCamelCase , end=',' )
A__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 706
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
A__ : Optional[Any] = "convnextv2"
def __init__( self : List[Any] , _snake_case : Optional[int]=3 , _snake_case : str=4 , _snake_case : Tuple=4 , _snake_case : Dict=None , _snake_case : Union[str, Any]=None , _snake_case : List[Any]="gelu" , _snake_case : Tuple=0.02 , _snake_case : List[str]=1E-12 , _snake_case : Optional[int]=0.0 , _snake_case : int=2_24 , _snake_case : List[Any]=None , _snake_case : Tuple=None , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**__a )
A__ = num_channels
A__ = patch_size
A__ = num_stages
A__ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A__ = [3, 3, 9, 3] if depths is None else depths
A__ = hidden_act
A__ = initializer_range
A__ = layer_norm_eps
A__ = drop_path_rate
A__ = image_size
A__ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
A__ = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCAmelCase ( _lowerCAmelCase ):
"""simple docstring"""
A__ : torch.FloatTensor
class __lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str , _snake_case : int = 16 , _snake_case : int = 88 , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : int = 1 , _snake_case : float = 0.0 , _snake_case : int = 32 , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : str = "geglu" , _snake_case : bool = True , _snake_case : bool = True , ):
"""simple docstring"""
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=_lowerCAmelCase , num_channels=_lowerCAmelCase , eps=1E-6 , affine=_lowerCAmelCase )
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dropout=_lowerCAmelCase , cross_attention_dim=_lowerCAmelCase , activation_fn=_lowerCAmelCase , attention_bias=_lowerCAmelCase , double_self_attention=_lowerCAmelCase , norm_elementwise_affine=_lowerCAmelCase , )
for d in range(_lowerCAmelCase )
] )
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] , _snake_case : str , _snake_case : List[Any]=None , _snake_case : int=None , _snake_case : Any=None , _snake_case : List[str]=1 , _snake_case : Any=None , _snake_case : bool = True , ):
"""simple docstring"""
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
A__ = self.norm(_lowerCAmelCase )
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowerCAmelCase , _lowerCAmelCase )
A__ = self.proj_in(_lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase , cross_attention_kwargs=_lowerCAmelCase , class_labels=_lowerCAmelCase , )
# 3. Output
A__ = self.proj_out(_lowerCAmelCase )
A__ = (
hidden_states[None, None, :]
.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
A__ = hidden_states.reshape(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowerCAmelCase )
| 708
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 0
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __lowerCAmelCase ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , _snake_case : Any = 16 , _snake_case : str = 88 , _snake_case : str = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[Any] = 0.0 , _snake_case : Optional[int] = 32 , _snake_case : Optional[Any] = None , _snake_case : Union[str, Any] = False , _snake_case : Dict = None , _snake_case : Optional[Any] = None , _snake_case : Optional[int] = "geglu" , _snake_case : Dict = None , ):
"""simple docstring"""
super().__init__()
A__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_a , attention_head_dim=_a , in_channels=_a , num_layers=_a , dropout=_a , norm_num_groups=_a , cross_attention_dim=_a , attention_bias=_a , sample_size=_a , num_vector_embeds=_a , activation_fn=_a , num_embeds_ada_norm=_a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ = [1, 0]
def _a ( self : Any , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Optional[Any]=None , _snake_case : str=None , _snake_case : List[Any]=None , _snake_case : List[Any] = True , ):
"""simple docstring"""
A__ = hidden_states
A__ = []
A__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ = self.transformer_index_for_condition[i]
A__ = self.transformers[transformer_index](
_a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , return_dict=_a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_a )
| 709
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
A__ : Optional[int] = "sequence-classification"
def __init__( self : Any , _snake_case : List[Any] ):
"""simple docstring"""
if type(UpperCamelCase__ ) == dict:
A__ = Namespace(**UpperCamelCase__ )
A__ = glue_output_modes[hparams.task]
A__ = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _a ( self : Dict , **_snake_case : str ):
"""simple docstring"""
return self.model(**UpperCamelCase__ )
def _a ( self : str , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
A__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A__ = self(**UpperCamelCase__ )
A__ = outputs[0]
A__ = self.trainer.lr_schedulers[0]['''scheduler''']
A__ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.hparams
A__ = processors[args.task]()
A__ = processor.get_labels()
for mode in ["train", "dev"]:
A__ = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , UpperCamelCase__ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
A__ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A__ = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _a ( self : Any , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] = False ):
"""simple docstring"""
A__ = '''dev''' if mode == '''test''' else mode
A__ = self._feature_file(UpperCamelCase__ )
logger.info('Loading features from cached file %s' , UpperCamelCase__ )
A__ = torch.load(UpperCamelCase__ )
A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A__ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A__ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _a ( self : Dict , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A__ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A__ = self(**UpperCamelCase__ )
A__ = outputs[:2]
A__ = logits.detach().cpu().numpy()
A__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a ( self : Union[str, Any] , _snake_case : int ):
"""simple docstring"""
A__ = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
A__ = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A__ = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A__ = np.squeeze(UpperCamelCase__ )
A__ = np.concatenate([x['target'] for x in outputs] , axis=0 )
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = [[] for _ in range(out_label_ids.shape[0] )]
A__ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
A__ = dict(results.items() )
A__ = results
return ret, preds_list, out_label_list
def _a ( self : List[Any] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self._eval_end(UpperCamelCase__ )
A__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a ( self : Tuple , _snake_case : Tuple ):
"""simple docstring"""
A__ = self._eval_end(UpperCamelCase__ )
A__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a ( _snake_case : Any , _snake_case : List[str] ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=UpperCamelCase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def A ( ) -> Dict:
A__ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
A__ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
A__ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A__ = os.path.join(
'./results' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
A__ = GLUETransformer(__UpperCamelCase )
A__ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A__ = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__UpperCamelCase ) )
A__ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 710
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 0
|
from PIL import Image
def A ( __UpperCamelCase ) -> Any:
A__ , A__ = image.size
A__ = 0
A__ = image.load()
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
A__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase_ ):
for i in range(lowerCamelCase_ ):
A__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 711
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = CTRLTokenizer
A__ : List[str] = False
A__ : Union[str, Any] = False
def _a ( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : Any , **_snake_case : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : List[str] , _snake_case : str ):
"""simple docstring"""
A__ = "adapt react readapt apt"
A__ = "adapt react readapt apt"
return input_text, output_text
def _a ( self : str ):
"""simple docstring"""
A__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = "adapt react readapt apt"
A__ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
A__ = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
| 712
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 0
|
import os
import numpy
import onnx
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
A__ = a.name
A__ = b.name
A__ = ''
A__ = ''
A__ = a == b
A__ = name_a
A__ = name_b
return res
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def A ( __UpperCamelCase ) -> List[str]:
A__ = os.path.dirname(a__ )
A__ = os.path.basename(a__ )
A__ = onnx.load(os.path.join(a__ , a__ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , a__ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_024 / 1_024 / 1_024 , 'GB' )
A__ = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
A__ = 'optimized_' + model_file_name
A__ = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 713
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 714
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 0
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = ['''model.decoder.embed_positions.weights''']
def A ( __UpperCamelCase ) -> Any:
if "emb" in name:
A__ = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
A__ = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
A__ = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
A__ = name.replace('linear1' , 'fc1' )
if "linear2" in name:
A__ = name.replace('linear2' , 'fc2' )
if "norm1" in name:
A__ = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
A__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
A__ = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
A__ = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
A__ = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
A__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = list(state_dict.keys() )
A__ = {}
for key in keys:
A__ = state_dict.pop(_UpperCAmelCase )
A__ = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A__ = val[:hidden_size, :]
A__ = val[hidden_size : 2 * hidden_size, :]
A__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A__ = val
else:
A__ = val
return state_dict, enc_dec_proj_state_dict
def A ( __UpperCamelCase ) -> Tuple:
if checkpoint == "small":
# default config values
A__ = 1_024
A__ = 24
A__ = 16
elif checkpoint == "medium":
A__ = 1_536
A__ = 48
A__ = 24
elif checkpoint == "large":
A__ = 2_048
A__ = 48
A__ = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
A__ = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="cpu" ) -> int:
A__ = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
A__ = decoder_config_from_checkpoint(_UpperCAmelCase )
A__ = fairseq_model.lm.state_dict()
A__ , A__ = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
A__ = TaEncoderModel.from_pretrained('t5-base' )
A__ = EncodecModel.from_pretrained('facebook/encodec_32khz' )
A__ = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A__ , A__ = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
A__ = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
A__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A__ = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
A__ = AutoTokenizer.from_pretrained('t5-base' )
A__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
A__ = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
A__ = 2_048
A__ = 2_048
# set other default generation config params
A__ = int(30 * audio_encoder.config.frame_rate )
A__ = True
A__ = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 715
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : int , **_snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(**__a )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(__a )
def _a ( self : List[str] , **_snake_case : Any ):
"""simple docstring"""
A__ = {}
A__ = {}
A__ = {}
# preprocess args
if "points_per_batch" in kwargs:
A__ = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
A__ = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
A__ = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
A__ = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
A__ = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
A__ = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
A__ = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
A__ = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
A__ = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
A__ = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
A__ = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
A__ = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[int] , _snake_case : Any , *_snake_case : List[str] , _snake_case : str=None , _snake_case : Dict=None , **_snake_case : Tuple ):
"""simple docstring"""
return super().__call__(__a , *__a , num_workers=__a , batch_size=__a , **__a )
def _a ( self : int , _snake_case : List[str] , _snake_case : List[Any]=64 , _snake_case : List[str] = 0 , _snake_case : Optional[Any] = 5_12 / 15_00 , _snake_case : str = 32 , _snake_case : Union[str, Any] = 1 , ):
"""simple docstring"""
A__ = load_image(__a )
A__ = self.image_processor.size['longest_edge']
A__ = self.image_processor.generate_crop_boxes(
__a , __a , __a , __a , __a , __a )
A__ = self.image_processor(images=__a , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
A__ = self.get_inference_context()
with inference_context():
A__ = self._ensure_tensor_on_device(__a , device=self.device )
A__ = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
A__ = image_embeddings
A__ = grid_points.shape[1]
A__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __a , __a ):
A__ = grid_points[:, i : i + points_per_batch, :, :]
A__ = input_labels[:, i : i + points_per_batch]
A__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _a ( self : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any]=0.88 , _snake_case : int=0.95 , _snake_case : Any=0 , _snake_case : Any=1 , ):
"""simple docstring"""
A__ = model_inputs.pop('input_boxes' )
A__ = model_inputs.pop('is_last' )
A__ = model_inputs.pop('original_sizes' ).tolist()
A__ = model_inputs.pop('reshaped_input_sizes' ).tolist()
A__ = self.model(**__a )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
A__ = model_outputs['pred_masks']
A__ = self.image_processor.post_process_masks(
__a , __a , __a , __a , binarize=__a )
A__ = model_outputs['iou_scores']
A__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __a , __a , __a , __a , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _a ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : List[Any]=False , _snake_case : Dict=False , _snake_case : List[Any]=0.7 , ):
"""simple docstring"""
A__ = []
A__ = []
A__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
A__ = torch.cat(__a )
A__ = torch.cat(__a )
A__ = self.image_processor.post_process_for_mask_generation(
__a , __a , __a , __a )
A__ = defaultdict(__a )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__a )
A__ = {}
if output_rle_mask:
A__ = rle_mask
if output_bboxes_mask:
A__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 716
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Dict , _snake_case : List[str]=13 , _snake_case : Any=3 , _snake_case : List[Any]=2_24 , _snake_case : List[str]=30 , _snake_case : List[str]=4_00 , _snake_case : Optional[int]=True , _snake_case : Optional[Any]=None , _snake_case : Union[str, Any]=True , _snake_case : List[str]=[0.5, 0.5, 0.5] , _snake_case : Optional[Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A__ = size if size is not None else {'height': 18, 'width': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def _a ( self : Any ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = ViTImageProcessor if is_vision_available() else None
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = EfficientFormerImageProcessorTester(self )
@property
def _a ( self : int ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , 'image_mean' ) )
self.assertTrue(hasattr(__A , 'image_std' ) )
self.assertTrue(hasattr(__A , 'do_normalize' ) )
self.assertTrue(hasattr(__A , 'do_resize' ) )
self.assertTrue(hasattr(__A , 'size' ) )
def _a ( self : List[Any] ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
A__ = image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
A__ = image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
A__ = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
A__ = image_processor(__A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
| 717
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __lowerCAmelCase ( __snake_case ):
"""simple docstring"""
A__ : Optional[Any] = "char"
A__ : Any = "bpe"
A__ : Dict = "wp"
SCREAMING_SNAKE_CASE__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __lowerCAmelCase ( __snake_case ):
"""simple docstring"""
A__ : Optional[Any] = ["image_processor", "char_tokenizer"]
A__ : List[Any] = "ViTImageProcessor"
A__ : Optional[Any] = "MgpstrTokenizer"
def __init__( self : Tuple , _snake_case : List[str]=None , _snake_case : Union[str, Any]=None , **_snake_case : Dict ):
"""simple docstring"""
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowercase , )
A__ = kwargs.pop('feature_extractor' )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
A__ = tokenizer
A__ = AutoTokenizer.from_pretrained('gpt2' )
A__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_lowercase , _lowercase )
def __call__( self : int , _snake_case : int=None , _snake_case : Tuple=None , _snake_case : List[Any]=None , **_snake_case : int ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
A__ = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None:
A__ = self.char_tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
A__ = encodings["""input_ids"""]
return inputs
def _a ( self : List[str] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = sequences
A__ = char_preds.size(0 )
A__ = self._decode_helper(_lowercase , 'char' )
A__ = self._decode_helper(_lowercase , 'bpe' )
A__ = self._decode_helper(_lowercase , 'wp' )
A__ = []
A__ = []
for i in range(_lowercase ):
A__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
A__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
A__ = scores.index(max(_lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A__ = {}
A__ = final_strs
A__ = final_scores
A__ = char_strs
A__ = bpe_strs
A__ = wp_strs
return out
def _a ( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any] ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A__ = self.char_decode
A__ = 1
A__ = """[s]"""
elif format == DecodeType.BPE:
A__ = self.bpe_decode
A__ = 2
A__ = """#"""
elif format == DecodeType.WORDPIECE:
A__ = self.wp_decode
A__ = 1_02
A__ = """[SEP]"""
else:
raise ValueError(F'''Format {format} is not supported.''' )
A__ = [], []
A__ = pred_logits.size(0 )
A__ = pred_logits.size(1 )
A__ = pred_logits.topk(1 , dim=-1 , largest=_lowercase , sorted=_lowercase )
A__ = preds_index.view(-1 , _lowercase )[:, 1:]
A__ = decoder(_lowercase )
A__ = torch.nn.functional.softmax(_lowercase , dim=2 ).max(dim=2 )
A__ = preds_max_prob[:, 1:]
for index in range(_lowercase ):
A__ = preds_str[index].find(_lowercase )
A__ = preds_str[index][:pred_eos]
A__ = preds_index[index].cpu().tolist()
A__ = pred_index.index(_lowercase ) if eos_token in pred_index else -1
A__ = preds_max_prob[index][: pred_eos_index + 1]
A__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_lowercase )
conf_scores.append(_lowercase )
return dec_strs, conf_scores
def _a ( self : int , _snake_case : int ):
"""simple docstring"""
A__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_lowercase )]
return decode_strs
def _a ( self : str , _snake_case : List[str] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(_lowercase )
def _a ( self : Dict , _snake_case : List[str] ):
"""simple docstring"""
A__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_lowercase )]
return decode_strs
| 718
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] , _snake_case : int=7 , _snake_case : Dict=3 , _snake_case : Any=30 , _snake_case : Any=4_00 , _snake_case : List[str]=True , _snake_case : Tuple=None , _snake_case : Union[str, Any]=0.9 , _snake_case : Any=None , _snake_case : int=True , _snake_case : Optional[Any]=[0.5, 0.5, 0.5] , _snake_case : List[str]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 30}
A__ = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def _a ( self : Optional[Any] ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = PoolFormerImageProcessingTester(self )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(A__ , 'size' ) )
self.assertTrue(hasattr(A__ , 'crop_pct' ) )
self.assertTrue(hasattr(A__ , 'do_normalize' ) )
self.assertTrue(hasattr(A__ , 'image_mean' ) )
self.assertTrue(hasattr(A__ , 'image_std' ) )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(A__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(A__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(A__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 719
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : List[Any] = 'open-llama'
def __init__( self : Optional[int] , _snake_case : Tuple=10_00_00 , _snake_case : str=40_96 , _snake_case : Union[str, Any]=1_10_08 , _snake_case : Dict=32 , _snake_case : Optional[int]=32 , _snake_case : Any="silu" , _snake_case : Optional[Any]=20_48 , _snake_case : Optional[int]=0.02 , _snake_case : List[Any]=1E-6 , _snake_case : List[str]=True , _snake_case : List[str]=0 , _snake_case : Any=1 , _snake_case : int=2 , _snake_case : Tuple=False , _snake_case : Union[str, Any]=True , _snake_case : Dict=0.1 , _snake_case : List[Any]=0.1 , _snake_case : int=True , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=None , **_snake_case : Dict , ):
"""simple docstring"""
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = initializer_range
A__ = rms_norm_eps
A__ = use_cache
A__ = kwargs.pop(
'use_memorry_efficient_attention' , _a )
A__ = hidden_dropout_prob
A__ = attention_dropout_prob
A__ = use_stable_embedding
A__ = shared_input_output_embedding
A__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def _a ( self : Dict ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
A__ = self.rope_scaling.get('type' , _a )
A__ = self.rope_scaling.get('factor' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 720
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : str = "upernet"
def __init__( self : Any , _snake_case : Union[str, Any]=None , _snake_case : List[str]=5_12 , _snake_case : List[Any]=0.02 , _snake_case : List[str]=[1, 2, 3, 6] , _snake_case : Dict=True , _snake_case : Tuple=0.4 , _snake_case : str=3_84 , _snake_case : Union[str, Any]=2_56 , _snake_case : Union[str, Any]=1 , _snake_case : int=False , _snake_case : str=2_55 , **_snake_case : Tuple , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
A__ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = backbone_config.get('model_type' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCamelCase__ )
A__ = backbone_config
A__ = hidden_size
A__ = initializer_range
A__ = pool_scales
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_in_channels
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = loss_ignore_index
def _a ( self : int ):
"""simple docstring"""
A__ = copy.deepcopy(self.__dict__ )
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
| 721
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( snake_case_ ):
"""simple docstring"""
A__ : Optional[Any] = ["""pixel_values"""]
def __init__( self : Tuple , _snake_case : List[Any] = True , _snake_case : Optional[Any] = None , _snake_case : List[str] = PILImageResampling.BICUBIC , _snake_case : List[Any] = True , _snake_case : Union[str, Any] = None , _snake_case : Optional[Any] = True , _snake_case : List[Any] = 1 / 2_55 , _snake_case : List[str] = True , _snake_case : Any = None , _snake_case : List[Any] = None , _snake_case : Union[str, Any] = True , **_snake_case : Dict , ):
"""simple docstring"""
super().__init__(**_snake_case )
A__ = size if size is not None else {'shortest_edge': 2_24}
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(_snake_case , default_to_square=_snake_case , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ = image_std if image_std is not None else OPENAI_CLIP_STD
A__ = do_convert_rgb
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : List[Any] = PILImageResampling.BICUBIC , _snake_case : Optional[Any] = None , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case , default_to_square=_snake_case )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A__ = get_resize_output_image_size(_snake_case , size=size['shortest_edge'] , default_to_square=_snake_case )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : str = None , **_snake_case : List[str] , ):
"""simple docstring"""
A__ = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_snake_case , size=(size['height'], size['width']) , data_format=_snake_case , **_snake_case )
def _a ( self : Tuple , _snake_case : Tuple , _snake_case : int , _snake_case : List[str] = None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : Union[str, Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : int = None , **_snake_case : Tuple , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def _a ( self : str , _snake_case : Optional[int] , _snake_case : List[str] = None , _snake_case : Tuple = None , _snake_case : Tuple = None , _snake_case : Any = None , _snake_case : int = None , _snake_case : Dict = None , _snake_case : str = None , _snake_case : Dict = None , _snake_case : List[str] = None , _snake_case : Dict = None , _snake_case : Tuple = None , _snake_case : List[Any] = None , _snake_case : str = ChannelDimension.FIRST , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(_snake_case , param_name='size' , default_to_square=_snake_case )
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(_snake_case , param_name='crop_size' , default_to_square=_snake_case )
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(_snake_case ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
A__ = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images]
if do_rescale:
A__ = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images]
if do_normalize:
A__ = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images]
A__ = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case )
| 700
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
from __future__ import annotations
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> None:
A__ = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def A ( __UpperCamelCase ) -> None:
A__ = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print('' )
print(len(SCREAMING_SNAKE_CASE_ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 701
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
A__ : Tuple = BarthezTokenizer
A__ : str = BarthezTokenizerFast
A__ : Tuple = True
A__ : List[str] = True
def _a ( self : Any ):
"""simple docstring"""
super().setUp()
A__ = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_snake_case )
A__ = tokenizer
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = '<pad>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_snake_case ) , 10_11_22 )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A__ = [0, 57, 30_18, 7_03_07, 91, 2]
A__ = self.tokenizer(
_snake_case , max_length=len(_snake_case ) , padding=_snake_case , truncation=_snake_case , return_tensors='pt' )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(_snake_case , _snake_case )
def _a ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(_snake_case )
A__ = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(_snake_case )
A__ = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def _a ( self : Any ):
"""simple docstring"""
A__ = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_snake_case , )
| 702
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 0
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def A ( ) -> Optional[int]:
A__ = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
A__ = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(__lowercase )
DownloadCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
RunCommand.register_subcommand(__lowercase )
ServeCommand.register_subcommand(__lowercase )
UserCommands.register_subcommand(__lowercase )
AddNewModelCommand.register_subcommand(__lowercase )
AddNewModelLikeCommand.register_subcommand(__lowercase )
LfsCommands.register_subcommand(__lowercase )
PTtoTFCommand.register_subcommand(__lowercase )
# Let's go
A__ = parser.parse_args()
if not hasattr(__lowercase , 'func' ):
parser.print_help()
exit(1 )
# Run
A__ = args.func(__lowercase )
service.run()
if __name__ == "__main__":
main()
| 703
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __lowerCAmelCase ( __lowercase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
A__ = SMALL_MODEL_IDENTIFIER
A__ = 'pt'
A__ = 'tf'
def _a ( self : Any , _snake_case : Tuple ):
"""simple docstring"""
A__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_A )
def _a ( self : Any , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_A )
model_tf.save_pretrained(_A )
def _a ( self : Dict ):
"""simple docstring"""
A__ = 'mock_framework'
# Framework provided - return whatever the user provides
A__ = FeaturesManager.determine_framework(self.test_model , _A )
self.assertEqual(_A , _A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_A )
A__ = FeaturesManager.determine_framework(_A , _A )
self.assertEqual(_A , _A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_A )
A__ = FeaturesManager.determine_framework(_A , _A )
self.assertEqual(_A , _A )
def _a ( self : Optional[int] ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_A )
A__ = FeaturesManager.determine_framework(_A )
self.assertEqual(_A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_A )
A__ = FeaturesManager.determine_framework(_A )
self.assertEqual(_A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_A ):
A__ = FeaturesManager.determine_framework(_A )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = MagicMock(return_value=_A )
with patch('transformers.onnx.features.is_tf_available' , _A ):
A__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
A__ = MagicMock(return_value=_A )
with patch('transformers.onnx.features.is_torch_available' , _A ):
A__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_tf )
# Both in environment -> use PyTorch
A__ = MagicMock(return_value=_A )
A__ = MagicMock(return_value=_A )
with patch('transformers.onnx.features.is_tf_available' , _A ), patch(
'transformers.onnx.features.is_torch_available' , _A ):
A__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_A , self.framework_pt )
# Both not in environment -> raise error
A__ = MagicMock(return_value=_A )
A__ = MagicMock(return_value=_A )
with patch('transformers.onnx.features.is_tf_available' , _A ), patch(
'transformers.onnx.features.is_torch_available' , _A ):
with self.assertRaises(_A ):
A__ = FeaturesManager.determine_framework(self.test_model )
| 704
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE__ = parse(importlib.metadata.version('''torch'''))
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
A__ = parse(importlib.metadata.version(lowerCAmelCase__ ) )
return operation(lowerCAmelCase__ , parse(lowerCAmelCase__ ) )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return compare_versions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
'''joule''': 1.0,
'''kilojoule''': 1_0_0_0,
'''megajoule''': 1_0_0_0_0_0_0,
'''gigajoule''': 1_0_0_0_0_0_0_0_0_0,
'''wattsecond''': 1.0,
'''watthour''': 3_6_0_0,
'''kilowatthour''': 3_6_0_0_0_0_0,
'''newtonmeter''': 1.0,
'''calorie_nutr''': 4_1_8_6.8,
'''kilocalorie_nutr''': 4_1_8_6_8_0_0.0_0,
'''electronvolt''': 1.6_0_2_1_7_6_6_3_4e-1_9,
'''britishthermalunit_it''': 1_0_5_5.0_5_5_8_5,
'''footpound''': 1.355_818,
}
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A__ = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {", ".join(lowerCAmelCase_ )}'''
)
raise ValueError(lowerCAmelCase_ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
SCREAMING_SNAKE_CASE__ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> Optional[Any]:
A__ , A__ = create_model(
'HTSAT-tiny' , 'roberta' , _A , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_A , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def A ( __UpperCamelCase ) -> List[str]:
A__ = {}
A__ = r'.*sequential.(\d+).*'
A__ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A__ = key.replace(_A , _A )
if re.match(_A , _A ):
# replace sequential layers with list
A__ = re.match(_A , _A ).group(1 )
A__ = key.replace(f'''sequential.{sequential_layer}.''' , f'''layers.{int(_A )//3}.linear.''' )
elif re.match(_A , _A ):
A__ = int(re.match(_A , _A ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
A__ = 1 if projecton_layer == 0 else 2
A__ = key.replace(f'''_projection.{projecton_layer}.''' , f'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
A__ = value
A__ = mixed_qkv.size(0 ) // 3
A__ = mixed_qkv[:qkv_dim]
A__ = mixed_qkv[qkv_dim : qkv_dim * 2]
A__ = mixed_qkv[qkv_dim * 2 :]
A__ = query_layer
A__ = key_layer
A__ = value_layer
else:
A__ = value
return model_state_dict
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> List[Any]:
A__ , A__ = init_clap(_A , enable_fusion=_A )
clap_model.eval()
A__ = clap_model.state_dict()
A__ = rename_state_dict(_A )
A__ = ClapConfig()
A__ = enable_fusion
A__ = ClapModel(_A )
# ignore the spectrogram embedding layer
model.load_state_dict(_A , strict=_A )
model.save_pretrained(_A )
transformers_config.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 708
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 0
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( _A):
"""simple docstring"""
def __init__( self : Tuple , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 709
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 0
|
def A ( __UpperCamelCase = 10**12 ) -> int:
A__ = 1
A__ = 0
A__ = 1
A__ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 710
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 0
|
def A ( __UpperCamelCase = 1_000 ) -> int:
return sum(e for e in range(3 , __snake_case ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 711
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _snake_case : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _a ( self : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError()
| 712
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 0
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : float
A__ : TreeNode | None = None
A__ : TreeNode | None = None
def A ( __UpperCamelCase ) -> Tuple:
def is_valid_tree(__UpperCamelCase ) -> bool:
if node is None:
return True
if not isinstance(A__ , A__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(A__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , A__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , A__ )
)
return is_binary_search_tree_recursive_check(A__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = args.log_outputs
A__ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
A__ = load_metric('wer' )
A__ = load_metric('cer' )
# compute metrics
A__ = wer.compute(references=result['target'] , predictions=result['prediction'] )
A__ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
A__ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(__UpperCamelCase )
with open(f'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(__UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f'''log_{dataset_id}_predictions.txt'''
A__ = f'''log_{dataset_id}_targets.txt'''
with open(__UpperCamelCase , 'w' ) as p, open(__UpperCamelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(__UpperCamelCase , __UpperCamelCase ):
p.write(f'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(f'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(__UpperCamelCase , with_indices=__UpperCamelCase )
def A ( __UpperCamelCase ) -> str:
A__ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(__UpperCamelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
A__ = ' '.join(text.split(__UpperCamelCase ) )
return text
def A ( __UpperCamelCase ) -> Union[str, Any]:
# load dataset
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('audio' , Audio(sampling_rate=__UpperCamelCase ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__UpperCamelCase ):
A__ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['text']
A__ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
A__ = dataset.map(__UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 52
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Any , _snake_case : Union[str, Any]=7 , _snake_case : List[Any]=3 , _snake_case : Any=30 , _snake_case : Tuple=4_00 , _snake_case : str=True , _snake_case : List[str]=None , _snake_case : Any=True , _snake_case : List[str]=1 / 2_55 , _snake_case : Union[str, Any]=True , _snake_case : str=[0.5, 0.5, 0.5] , _snake_case : int=[0.5, 0.5, 0.5] , _snake_case : Optional[Any]=True , ):
"""simple docstring"""
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_pad
def _a ( self : Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def _a ( self : Tuple , _snake_case : Optional[Any] , _snake_case : List[Any]=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(_snake_case , Image.Image ):
A__ = image.size
else:
A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['shortest_edge'] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size['shortest_edge'] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(_snake_case , key=lambda _snake_case : item[0] )[0]
A__ = max(_snake_case , key=lambda _snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = DetrImageProcessor if is_vision_available() else None
def _a ( self : Dict ):
"""simple docstring"""
A__ = DetrImageProcessingTester(self )
@property
def _a ( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'image_mean' ) )
self.assertTrue(hasattr(_snake_case , 'image_std' ) )
self.assertTrue(hasattr(_snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case , 'do_rescale' ) )
self.assertTrue(hasattr(_snake_case , 'rescale_factor' ) )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_pad' ) )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _snake_case )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_snake_case )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(_snake_case , return_tensors='pt' ).pixel_values
A__ = self.image_processor_tester.get_expected_values(_snake_case , batched=_snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self : Tuple ):
"""simple docstring"""
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 3_97_69, "annotations": target}
# encode them
A__ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
A__ = image_processing(images=_snake_case , annotations=_snake_case , return_tensors='pt' )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _snake_case )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _snake_case , atol=1E-4 ) )
# verify area
A__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _snake_case ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _snake_case )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _snake_case , atol=1E-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _snake_case ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _snake_case ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _snake_case ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _snake_case ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _snake_case ) )
@slow
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A__ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
A__ = image_processing(images=_snake_case , annotations=_snake_case , masks_path=_snake_case , return_tensors='pt' )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _snake_case )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _snake_case , atol=1E-4 ) )
# verify area
A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _snake_case ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _snake_case )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _snake_case , atol=1E-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _snake_case ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _snake_case ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _snake_case ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _snake_case )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _snake_case ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _snake_case ) )
| 714
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52
| 0
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
A__ = BertConfig.from_json_file(a__ )
print(f'''Building PyTorch model from configuration: {config}''' )
A__ = BertForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a__ , a__ , a__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 715
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
SCREAMING_SNAKE_CASE__ = {'''facebook/blenderbot_small-90M''': 5_1_2}
def A ( __UpperCamelCase ) -> str:
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
A__ = set(snake_case__ )
return pairs
class __lowerCAmelCase ( __lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : int="__start__" , _snake_case : Dict="__end__" , _snake_case : Dict="__unk__" , _snake_case : Tuple="__null__" , **_snake_case : Optional[Any] , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
A__ = json.load(lowerCAmelCase_ )
A__ = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='utf-8' ) as merges_handle:
A__ = merges_handle.read().split('\n' )[1:-1]
A__ = [tuple(merge.split() ) for merge in merges]
A__ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ = {}
@property
def _a ( self : Tuple ):
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : str , _snake_case : List[str] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase_ )
A__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase_ )
A__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase_ )
if "\n" in token:
A__ = token.replace('\n' , ' __newln__' )
A__ = token.split(' ' )
A__ = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
A__ = token.lower()
A__ = tuple(lowerCAmelCase_ )
A__ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A__ = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
A__ = min(lowerCAmelCase_ , key=lambda _snake_case : self.bpe_ranks.get(lowerCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(lowerCAmelCase_ ):
try:
A__ = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
A__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(lowerCAmelCase_ )
A__ = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
A__ = get_pairs(lowerCAmelCase_ )
A__ = '@@ '.join(lowerCAmelCase_ )
A__ = word[:-4]
A__ = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def _a ( self : List[str] , _snake_case : Dict ):
"""simple docstring"""
A__ = []
A__ = re.findall(R'\S+\n?' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(' ' ) ) )
return split_tokens
def _a ( self : Dict , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def _a ( self : Dict , _snake_case : List[Any] ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def _a ( self : Dict , _snake_case : Tuple ):
"""simple docstring"""
A__ = ' '.join(lowerCAmelCase_ ).replace('@@ ' , '' ).strip()
return out_string
def _a ( self : int , _snake_case : Dict , _snake_case : Dict = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
A__ = 0
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A__ = token_index
writer.write(' '.join(lowerCAmelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
| 716
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE__ = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE__ = {
'''google/rembert''': 2_5_6,
}
SCREAMING_SNAKE_CASE__ = '''▁'''
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Any = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = RemBertTokenizer
def __init__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Any=True , _snake_case : Optional[int]=True , _snake_case : Dict=False , _snake_case : Dict="[CLS]" , _snake_case : List[Any]="[SEP]" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="[SEP]" , _snake_case : List[str]="<pad>" , _snake_case : str="[CLS]" , _snake_case : Any="[MASK]" , **_snake_case : Any , ):
"""simple docstring"""
A__ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def _a ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1]
def _a ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Any , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error('Vocabulary path ({}) should be a directory'.format(_snake_case ) )
return
A__ = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file , _snake_case )
return (out_vocab_file,)
| 52
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = 'data2vec-vision'
def __init__( self : Optional[Any] , _snake_case : Optional[int]=7_68 , _snake_case : List[Any]=12 , _snake_case : Optional[int]=12 , _snake_case : int=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : Tuple=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : List[str]=0.02 , _snake_case : List[str]=1E-12 , _snake_case : int=2_24 , _snake_case : Dict=16 , _snake_case : Tuple=3 , _snake_case : Dict=False , _snake_case : Tuple=False , _snake_case : List[str]=False , _snake_case : Any=False , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Dict=True , _snake_case : str=[3, 5, 7, 11] , _snake_case : Optional[int]=[1, 2, 3, 6] , _snake_case : str=True , _snake_case : Tuple=0.4 , _snake_case : List[str]=2_56 , _snake_case : Optional[Any]=1 , _snake_case : Any=False , _snake_case : Union[str, Any]=2_55 , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = use_mask_token
A__ = use_absolute_position_embeddings
A__ = use_relative_position_bias
A__ = use_shared_relative_position_bias
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = use_mean_pooling
# decode head attributes (semantic segmentation)
A__ = out_indices
A__ = pool_scales
# auxiliary head attributes (semantic segmentation)
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = semantic_loss_ignore_index
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[int] = version.parse("1.11" )
@property
def _a ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self : str ):
"""simple docstring"""
return 1E-4
| 717
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ = '''sshleifer/bart-tiny-random'''
SCREAMING_SNAKE_CASE__ = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Optional[int] ):
"""simple docstring"""
return AutoConfig.from_pretrained(_snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=_snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self : str ):
"""simple docstring"""
A__ , *A__ = create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self : str ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
create_student_by_copying_alternating_layers(_snake_case , tempfile.mkdtemp() , e=_snake_case , d=_snake_case )
| 52
| 0
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
SCREAMING_SNAKE_CASE__ = cvtColor(img, COLOR_BGR2GRAY)
def A ( ) -> List[str]:
A__ = cn.convert_to_negative(__UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def A ( ) -> Tuple:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__UpperCamelCase , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A ( ) -> Union[str, Any]:
A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A ( ) -> str:
A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A__ = canny.canny(__UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def A ( ) -> Optional[Any]:
assert gg.gaussian_filter(__UpperCamelCase , 5 , sigma=0.9 ).all()
def A ( ) -> Optional[int]:
A__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A__ = conv.img_convolve(__UpperCamelCase , __UpperCamelCase ).astype(__UpperCamelCase )
assert res.any()
def A ( ) -> str:
assert med.median_filter(__UpperCamelCase , 3 ).any()
def A ( ) -> Any:
A__ = sob.sobel_filter(__UpperCamelCase )
assert grad.any() and theta.any()
def A ( ) -> List[Any]:
A__ = sp.make_sepia(__UpperCamelCase , 20 )
assert sepia.all()
def A ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Tuple:
A__ = bs.Burkes(imread(__UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def A ( __UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
A__ = rs.NearestNeighbour(imread(__UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def A ( ) -> str:
A__ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
A__ = imread(__UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
A__ = 0
A__ = 0
A__ = image[x_coordinate][y_coordinate]
A__ = lbp.get_neighbors_pixel(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A__ = lbp.local_binary_value(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
assert lbp_image.any()
| 718
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Union[str, Any] = ["image_processor", "tokenizer"]
A__ : Optional[Any] = "BridgeTowerImageProcessor"
A__ : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self : List[Any] , _snake_case : int , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
A__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case )
encoding.update(_snake_case )
return encoding
def _a ( self : Any , *_snake_case : Tuple , **_snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _a ( self : Dict , *_snake_case : Dict , **_snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _a ( self : Tuple ):
"""simple docstring"""
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 52
| 0
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 6_5_5_2_1
def A ( __UpperCamelCase ) -> str:
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(__a )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 719
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 0
|
def A ( __UpperCamelCase ) -> Dict:
return "".join(chr(ord(__UpperCamelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 0
|
from __future__ import annotations
import os
from collections.abc import Mapping
SCREAMING_SNAKE_CASE__ = tuple[int, int]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : set[int] , _snake_case : Mapping[EdgeT, int] ):
"""simple docstring"""
A__ = vertices
A__ = {
(min(UpperCAmelCase__ ), max(UpperCAmelCase__ )): weight for edge, weight in edges.items()
}
def _a ( self : Any , _snake_case : EdgeT , _snake_case : int ):
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
A__ = weight
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = Graph({min(self.vertices )} , {} )
A__ = 42
A__ = 42
A__ = 42
A__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
A__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
A__ = edge
A__ = weight
subgraph.add_edge(UpperCAmelCase__ , UpperCAmelCase__ )
return subgraph
def A ( __UpperCamelCase = "p107_network.txt" ) -> int:
A__ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
A__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
A__ = {}
A__ = 42
A__ = 42
A__ = 42
with open(__UpperCamelCase ) as f:
A__ = f.read().strip().split('\n' )
A__ = [line.split(',' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
A__ = int(adjaceny_matrix[edgea][edgea] )
A__ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
A__ = graph.prims_algorithm()
A__ = sum(graph.edges.values() )
A__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 721
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : Any , _snake_case : Optional[int]=13 , _snake_case : Optional[Any]=64 , _snake_case : List[str]=2 , _snake_case : Any=3 , _snake_case : Union[str, Any]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : int=5 , _snake_case : Union[str, Any]=4 , _snake_case : int=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Dict=0.1 , _snake_case : List[str]=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=[1, 16, 4, 4] , _snake_case : Dict=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 32) ** 2
A__ = num_patches + 1
def _a ( self : Any ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def _a ( self : Tuple ):
"""simple docstring"""
A__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_snake_case , )
def _a ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
A__ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : str = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] = False
A__ : Any = False
A__ : Union[str, Any] = False
def _a ( self : Dict ):
"""simple docstring"""
A__ = ViTHybridModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self : int ):
"""simple docstring"""
pass
def _a ( self : int ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _a ( self : List[str] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _a ( self : str ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
A__ = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _a ( self : int ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A ( ) -> Union[str, Any]:
A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
A__ = model(**_snake_case )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _snake_case )
A__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
A__ = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
A__ = prepare_img()
A__ = image_processor(images=_snake_case , return_tensors='pt' )
A__ = model(**_snake_case )
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 52
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ = logging.getLogger()
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument('-f' )
A__ = parser.parse_args()
return args.f
class __lowerCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
A__ = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowerCamelCase )
def _a ( self : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowerCamelCase , 0.666 )
@slow
@require_torch_non_multi_gpu
def _a ( self : List[str] ):
"""simple docstring"""
A__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(_lowerCamelCase )
A__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_lowerCamelCase )
A__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_lowerCamelCase )
| 700
|
def A ( __UpperCamelCase ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE__ = False
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A__ = torch.manual_seed(0 )
A__ = pipe.dual_guided(
prompt='first prompt' , image=__lowerCamelCase , text_to_image_strength=0.75 , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
A__ = VersatileDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = generator.manual_seed(0 )
A__ = pipe.dual_guided(
prompt='first prompt' , image=__lowerCamelCase , text_to_image_strength=0.75 , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _a ( self : Dict ):
"""simple docstring"""
A__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''cyberpunk 2077'''
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A__ = torch.manual_seed(0 )
A__ = pipe.dual_guided(
prompt=__lowerCamelCase , image=__lowerCamelCase , text_to_image_strength=0.75 , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
A__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A__ = '''A painting of a squirrel eating a burger '''
A__ = torch.manual_seed(0 )
A__ = pipe.text_to_image(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
A__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A__ = pipe.image_variation(__lowerCamelCase , generator=__lowerCamelCase , output_type='numpy' ).images
A__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 701
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self : Any , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Any=None , **_snake_case : str ):
"""simple docstring"""
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def _a ( self : Any , _snake_case : Dict , **_snake_case : Optional[Any] ):
"""simple docstring"""
A__ = self.framework
A__ = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
return model_inputs
def _a ( self : List[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.model(**_snake_case )
return model_outputs
def _a ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : str=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *_snake_case : int , **_snake_case : List[str] ):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case )
| 52
| 0
|
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self : Tuple , _snake_case : str , _snake_case : str ):
"""simple docstring"""
A__ , A__ = text, pattern
A__ , A__ = len(_lowercase ), len(_lowercase )
def _a ( self : List[str] , _snake_case : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _a ( self : Union[str, Any] , _snake_case : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = []
for i in range(self.textLen - self.patLen + 1 ):
A__ = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
A__ = self.match_in_pattern(self.text[mismatch_index] )
A__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE__ = "ABAABA"
SCREAMING_SNAKE_CASE__ = "AB"
SCREAMING_SNAKE_CASE__ = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 702
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
A__ : str = field(metadata={"help": "Should contain the data files for the task."} )
A__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A__ : bool = field(
default=UpperCAmelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def A ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCamelCase )
# Set seed
set_seed(training_args.seed )
try:
A__ = processors[data_args.task_name]()
A__ = processor.get_labels()
A__ = len(__UpperCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__UpperCamelCase ) -> Dict:
A__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__UpperCamelCase , p.label_ids )}
# Data collator
A__ = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A__ = trainer.evaluate()
A__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(__UpperCamelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCamelCase , __UpperCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCamelCase )
return results
def A ( __UpperCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 52
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
SCREAMING_SNAKE_CASE__ = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
A__ : List[Any] = True
A__ : Optional[Any] = None
# Automatically constructed
A__ : Any = "PIL.Image.Image"
A__ : str = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A__ : Tuple = field(default="Image" , init=snake_case__ , repr=snake_case__ )
def __call__( self : Tuple ):
"""simple docstring"""
return self.pa_type
def _a ( self : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(lowercase_ , lowercase_ ):
A__ = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _a ( self : str , _snake_case : str , _snake_case : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
A__ = {}
A__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
A__ = PIL.Image.open(lowercase_ )
else:
A__ = path.split('::' )[-1]
try:
A__ = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["repo_id"]
A__ = token_per_repo_id.get(lowercase_ )
except ValueError:
A__ = None
with xopen(lowercase_ , 'rb' , use_auth_token=lowercase_ ) as f:
A__ = BytesIO(f.read() )
A__ = PIL.Image.open(bytes_ )
else:
A__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _a ( self : int ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def _a ( self : List[str] , _snake_case : Tuple ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
A__ = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A__ = pa.array([None] * len(lowercase_ ) , type=pa.string() )
A__ = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
A__ = storage.field('bytes' )
else:
A__ = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
A__ = storage.field('path' )
else:
A__ = pa.array([None] * len(lowercase_ ) , type=pa.string() )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A__ = pa.array(
[encode_np_array(np.array(lowercase_ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A__ = pa.array([None] * len(lowercase_ ) , type=pa.string() )
A__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def _a ( self : List[str] , _snake_case : Tuple ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_snake_case : Optional[Any] ):
with xopen(lowercase_ , 'rb' ) as f:
A__ = f.read()
return bytes_
A__ = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A__ = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def A ( ) -> Optional[int]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A ( __UpperCamelCase ) -> Tuple:
A__ = BytesIO()
if image.format in list_image_compression_formats():
A__ = image.format
else:
A__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def A ( __UpperCamelCase ) -> Optional[int]:
if hasattr(__SCREAMING_SNAKE_CASE , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def A ( __UpperCamelCase ) -> Optional[int]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
A__ = array.dtype
A__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
A__ = dtype.kind
A__ = dtype.itemsize
A__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A__ = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A__ = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
A__ = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
A__ = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def A ( __UpperCamelCase ) -> int:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
A__ = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
A__ = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
A__ = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 703
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 52
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = '''transfo-xl'''
A__ : List[Any] = ['''mems''']
A__ : List[str] = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[str] , _snake_case : Optional[int]=26_77_35 , _snake_case : int=[2_00_00, 4_00_00, 20_00_00] , _snake_case : Tuple=10_24 , _snake_case : Optional[Any]=10_24 , _snake_case : Tuple=16 , _snake_case : int=64 , _snake_case : Any=40_96 , _snake_case : Optional[int]=4 , _snake_case : Tuple=False , _snake_case : List[str]=18 , _snake_case : Union[str, Any]=16_00 , _snake_case : List[Any]=10_00 , _snake_case : Optional[Any]=True , _snake_case : int=True , _snake_case : Optional[int]=0 , _snake_case : Dict=-1 , _snake_case : int=True , _snake_case : List[Any]=0.1 , _snake_case : int=0.0 , _snake_case : Union[str, Any]=True , _snake_case : Any="normal" , _snake_case : Tuple=0.01 , _snake_case : int=0.01 , _snake_case : List[Any]=0.02 , _snake_case : Optional[int]=1E-5 , _snake_case : Optional[int]=0 , **_snake_case : Dict , ):
"""simple docstring"""
A__ = vocab_size
A__ = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
A__ = [False] + [True] * len(self.cutoffs )
else:
A__ = [False] + [False] * len(self.cutoffs )
A__ = d_model
A__ = d_embed
A__ = d_head
A__ = d_inner
A__ = div_val
A__ = pre_lnorm
A__ = n_layer
A__ = n_head
A__ = mem_len
A__ = same_length
A__ = attn_type
A__ = clamp_len
A__ = sample_softmax
A__ = adaptive
A__ = dropout
A__ = dropatt
A__ = untie_r
A__ = init
A__ = init_range
A__ = proj_init_std
A__ = init_std
A__ = layer_norm_epsilon
super().__init__(eos_token_id=A_ , **A_ )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _a ( self : Union[str, Any] , _snake_case : Tuple ):
"""simple docstring"""
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 704
|
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52
| 0
|
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation='''relu''')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation='''relu'''))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation='''relu'''))
classifier.add(layers.Dense(units=1, activation='''sigmoid'''))
# Compiling the CNN
classifier.compile(
optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy''']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
'''dataset/training_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
'''dataset/test_set''', target_size=(6_4, 6_4), batch_size=3_2, class_mode='''binary'''
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save('''cnn.h5''')
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
'''dataset/single_prediction/image.png''', target_size=(6_4, 6_4)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = 'Normal'
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = 'Abnormality detected'
| 705
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = IFInpaintingPipeline
A__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
A__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def _a ( self : Any ):
"""simple docstring"""
return self._get_dummy_components()
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : str=0 ):
"""simple docstring"""
if str(_snake_case ).startswith('mps' ):
A__ = torch.manual_seed(_snake_case )
else:
A__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self : Optional[int] ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self : List[str] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self : Dict ):
"""simple docstring"""
self._test_save_load_local()
def _a ( self : Optional[int] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 52
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
SCREAMING_SNAKE_CASE__ = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def _a ( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Dict=False ):
"""simple docstring"""
if return_pvalue:
A__ = pearsonr(__UpperCamelCase , __UpperCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )}
| 706
|
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
SCREAMING_SNAKE_CASE__ = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@add_start_docstrings(_snake_case )
def __call__( self : Any , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int , **_snake_case : Optional[int] ):
"""simple docstring"""
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(_snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
F'''{processor.__class__} are passed to the logits processor.''' )
A__ = processor(_snake_case , _snake_case , _snake_case , **_snake_case )
else:
A__ = processor(_snake_case , _snake_case , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : float ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or not (temperature > 0):
raise ValueError(F'''`temperature` has to be a strictly positive float, but is {temperature}''' )
A__ = temperature
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores / self.temperature
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : float , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_snake_case , _snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self : str , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = lax.top_k(_snake_case , scores.shape[-1] )
A__ = jnp.full_like(_snake_case , self.filter_value )
A__ = jax.nn.softmax(_snake_case , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(_snake_case , 1 )
score_mask |= score_mask.at[:, 0].set(_snake_case )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(_snake_case )
A__ = jnp.where(_snake_case , _snake_case , _snake_case )
A__ = jax.lax.sort_key_val(_snake_case , _snake_case )[-1]
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : float = -float('Inf' ) , _snake_case : int = 1 ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or top_k <= 0:
raise ValueError(F'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
A__ = max(_snake_case , _snake_case )
A__ = filter_value
def __call__( self : Optional[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(_snake_case , _snake_case )
A__ = jnp.broadcast_to((jnp.arange(_snake_case ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(_snake_case )
A__ = next_scores_flat.reshape(_snake_case , _snake_case )
return next_scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int ):
"""simple docstring"""
A__ = bos_token_id
def __call__( self : Optional[int] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.bos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , _snake_case : int , _snake_case : int ):
"""simple docstring"""
A__ = max_length
A__ = eos_token_id
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = jnp.full(scores.shape , -float('inf' ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(_snake_case , new_scores.at[:, self.eos_token_id].set(0 ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ) or min_length < 0:
raise ValueError(F'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_snake_case , _snake_case ) or eos_token_id < 0:
raise ValueError(F'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
A__ = min_length
A__ = eos_token_id
def __call__( self : int , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(_snake_case , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = list(_snake_case )
A__ = begin_index
def __call__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int ):
"""simple docstring"""
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(_snake_case , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _snake_case )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : int , _snake_case : list ):
"""simple docstring"""
A__ = list(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
A__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = dict(_snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(_snake_case )
A__ = jnp.intaa(_snake_case )
def __call__( self : List[Any] , _snake_case : jnp.ndarray , _snake_case : jnp.ndarray , _snake_case : int ):
"""simple docstring"""
def _force_token(_snake_case : Dict ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(_snake_case , dtype=scores.dtype ) * -float('inf' )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(_snake_case , _snake_case , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_snake_case ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] ):
"""simple docstring"""
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_snake_case , 'max_initial_timestamp_index' ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Dict ):
"""simple docstring"""
A__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_snake_case : Dict , _snake_case : str ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _snake_case , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , _snake_case , _snake_case )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _snake_case , _snake_case , )
return jnp.where(
_snake_case , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
A__ = jnp.where(cur_len == self.begin_index , _snake_case , _snake_case )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _snake_case , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
_snake_case , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _snake_case , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(_snake_case , axis=-1 )
def handle_cumulative_probs(_snake_case : List[Any] , _snake_case : Union[str, Any] ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _snake_case , )
A__ = jax.vmap(_snake_case )(_snake_case , _snake_case )
return scores
| 52
| 0
|
from typing import List
from .keymap import KEYMAP, get_character
def A ( __UpperCamelCase ) -> Optional[int]:
def decorator(__UpperCamelCase ):
A__ = getattr(__UpperCamelCase , 'handle_key' , [] )
handle += [key]
setattr(__UpperCamelCase , 'handle_key' , __UpperCamelCase )
return func
return decorator
def A ( *__UpperCamelCase ) -> Any:
def decorator(__UpperCamelCase ):
A__ = getattr(__UpperCamelCase , 'handle_key' , [] )
handle += keys
setattr(__UpperCamelCase , 'handle_key' , __UpperCamelCase )
return func
return decorator
class __lowerCAmelCase ( A_ ):
"""simple docstring"""
def __new__( cls : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] ):
"""simple docstring"""
A__ = super().__new__(cls , _snake_case , _snake_case , _snake_case )
if not hasattr(_snake_case , 'key_handler' ):
setattr(_snake_case , 'key_handler' , {} )
setattr(_snake_case , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
A__ = getattr(_snake_case , 'handle_key' , [] )
for key in handled_keys:
A__ = value
return new_cls
@staticmethod
def _a ( cls : List[Any] ):
"""simple docstring"""
A__ = get_character()
if char != KEYMAP["undefined"]:
A__ = ord(_snake_case )
A__ = cls.key_handler.get(_snake_case )
if handler:
A__ = char
return handler(cls )
else:
return None
def A ( cls ) -> int:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 707
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : Union[str, Any] , _snake_case : int=13 , _snake_case : str=7 , _snake_case : Optional[int]=True , _snake_case : str=True , _snake_case : str=True , _snake_case : int=True , _snake_case : str=99 , _snake_case : Any=32 , _snake_case : Dict=5 , _snake_case : int=4 , _snake_case : Tuple=37 , _snake_case : Optional[int]="gelu" , _snake_case : Dict=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Optional[Any]=16 , _snake_case : Optional[int]=2 , _snake_case : int=0.02 , _snake_case : List[str]=False , _snake_case : Tuple=True , _snake_case : Optional[Any]="None" , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=4 , _snake_case : str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = relative_attention
A__ = position_biased_input
A__ = pos_att_type
A__ = scope
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self : Optional[Any] , _snake_case : List[Any] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self : Optional[Any] , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : Tuple , _snake_case : Dict , _snake_case : Any , _snake_case : str ):
"""simple docstring"""
A__ = DebertaVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
A__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
A__ = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self : str , _snake_case : int , _snake_case : Dict , _snake_case : List[str] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
A__ = DebertaVaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Tuple ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaVaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def _a ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Dict ):
"""simple docstring"""
A__ = self.num_labels
A__ = DebertaVaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Optional[int] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = DebertaVaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : Dict , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Tuple ):
"""simple docstring"""
A__ = DebertaVaForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
(
A__
) = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __A , __A , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A__ : Tuple = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Optional[int] = True
A__ : int = False
A__ : Union[str, Any] = False
A__ : List[str] = False
A__ : Optional[int] = False
def _a ( self : Tuple ):
"""simple docstring"""
A__ = DebertaVaModelTester(self )
A__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _a ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def _a ( self : Any ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
def _a ( self : Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCamelCase__ )
@slow
def _a ( self : List[str] ):
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = DebertaVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : List[str] ):
"""simple docstring"""
A__ = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 708
|
import math
import random
def A ( __UpperCamelCase , __UpperCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
SCREAMING_SNAKE_CASE__ = 0.02
def A ( __UpperCamelCase , __UpperCamelCase ) -> float:
A__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__UpperCamelCase ):
# Forward propagation
A__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A__ = (expected / 100) - layer_a
# Error delta
A__ = layer_1_error * sigmoid_function(__UpperCamelCase , __UpperCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = int(input('''Expected value: '''))
SCREAMING_SNAKE_CASE__ = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 52
| 0
|
from __future__ import annotations
SCREAMING_SNAKE_CASE__ = []
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool:
for i in range(len(__UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , len(__UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def A ( __UpperCamelCase , __UpperCamelCase ) -> bool:
if row >= len(__UpperCamelCase ):
solution.append(__UpperCamelCase )
printboard(__UpperCamelCase )
print()
return True
for i in range(len(__UpperCamelCase ) ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A__ = 1
solve(__UpperCamelCase , row + 1 )
A__ = 0
return False
def A ( __UpperCamelCase ) -> None:
for i in range(len(__UpperCamelCase ) ):
for j in range(len(__UpperCamelCase ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 709
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
A__ = AutoTokenizer.from_pretrained('google/mt5-small' )
A__ = tokenizer('Hello there' , return_tensors='np' ).input_ids
A__ = tokenizer('Hi I am' , return_tensors='np' ).input_ids
A__ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
A__ = model(_snake_case , decoder_input_ids=_snake_case ).logits
A__ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1] ) ).mean()
A__ = -(labels.shape[-1] * loss.item())
A__ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 52
| 0
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : List[str] , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return F'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy'''
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _a ( self : Optional[Any] , _snake_case : List[str]=0 , _snake_case : Tuple=(4, 4, 64, 64) , _snake_case : Union[str, Any]=False ):
"""simple docstring"""
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return image
def _a ( self : str , _snake_case : List[Any]=False , _snake_case : Optional[int]="CompVis/stable-diffusion-v1-4" ):
"""simple docstring"""
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = 'bf16' if fpaa else None
A__ , A__ = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase__ , subfolder='unet' , dtype=UpperCamelCase__ , revision=UpperCamelCase__ )
return model, params
def _a ( self : Tuple , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=(4, 77, 7_68) , _snake_case : List[str]=False ):
"""simple docstring"""
A__ = jnp.bfloataa if fpaa else jnp.floataa
A__ = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _a ( self : List[str] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : int ):
"""simple docstring"""
A__ , A__ = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=UpperCamelCase__ )
A__ = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ )
A__ = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ )
A__ = model.apply(
{'params': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
A__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A__ = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _a ( self : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ):
"""simple docstring"""
A__ , A__ = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=UpperCamelCase__ )
A__ = self.get_latents(UpperCamelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase__ )
A__ = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 77, 10_24) , fpaa=UpperCamelCase__ )
A__ = model.apply(
{'params': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
A__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
A__ = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
| 710
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = "roberta"
def __init__( self : List[str] , _snake_case : Union[str, Any]=5_02_65 , _snake_case : List[Any]=7_68 , _snake_case : List[str]=12 , _snake_case : List[str]=12 , _snake_case : Any=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=5_12 , _snake_case : Union[str, Any]=2 , _snake_case : Any=0.02 , _snake_case : Any=1E-12 , _snake_case : List[Any]=1 , _snake_case : int=0 , _snake_case : Any=2 , _snake_case : Optional[Any]="absolute" , _snake_case : int=True , _snake_case : Any=None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 52
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __lowerCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "codegen"
A__ : Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _snake_case : Optional[int]=5_04_00 , _snake_case : Optional[Any]=20_48 , _snake_case : Union[str, Any]=20_48 , _snake_case : Union[str, Any]=40_96 , _snake_case : List[Any]=28 , _snake_case : str=16 , _snake_case : Optional[Any]=64 , _snake_case : str=None , _snake_case : Tuple="gelu_new" , _snake_case : List[str]=0.0 , _snake_case : Dict=0.0 , _snake_case : Dict=0.0 , _snake_case : List[Any]=1E-5 , _snake_case : str=0.02 , _snake_case : Optional[int]=True , _snake_case : Any=5_02_56 , _snake_case : Optional[Any]=5_02_56 , _snake_case : Tuple=False , **_snake_case : List[Any] , ):
"""simple docstring"""
A__ = vocab_size
A__ = n_ctx
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = n_inner
A__ = rotary_dim
A__ = activation_function
A__ = resid_pdrop
A__ = embd_pdrop
A__ = attn_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
A__ = bos_token_id
A__ = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class __lowerCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : int , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
A__ = 0
@property
def _a ( self : List[str] ):
"""simple docstring"""
A__ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
A__ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self._config.n_layer
@property
def _a ( self : Dict ):
"""simple docstring"""
return self._config.n_head
def _a ( self : Any , _snake_case : PreTrainedTokenizer , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional[TensorType] = None , ):
"""simple docstring"""
A__ = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
A__ = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A__ , A__ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
A__ = common_inputs['attention_mask']
if self.use_past:
A__ = ordered_inputs['attention_mask'].dtype
A__ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return 13
| 711
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = LongformerTokenizer
A__ : Optional[int] = True
A__ : Any = LongformerTokenizerFast
A__ : Dict = True
def _a ( self : int ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A__ = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A__ = {'unk_token': '<unk>'}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_snake_case ) )
def _a ( self : int , **_snake_case : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Optional[int] , **_snake_case : List[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def _a ( self : Any , _snake_case : Optional[Any] ):
"""simple docstring"""
A__ = 'lower newer'
A__ = 'lower newer'
return input_text, output_text
def _a ( self : Any ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = 'lower newer'
A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A__ = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_snake_case ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=_snake_case )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case )
A__ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = 'Encode this sequence.'
A__ = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A__ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
A__ = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
A__ = 'Encode <mask> sequence'
A__ = 'Encode <mask>sequence'
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
A__ = tokenizer.encode(_snake_case )
A__ = encoded.index(_snake_case )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = 'A, <mask> AllenNLP sentence.'
A__ = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
A__ = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_snake_case , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _a ( self : List[Any] ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['add_prefix_space'] , _snake_case )
self.assertEqual(post_processor_state['trim_offsets'] , _snake_case )
def _a ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F'''{text_of_1_token} {text_of_1_token}'''
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
A__ = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 52
| 0
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ = threading.Lock()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ = logging.WARNING
SCREAMING_SNAKE_CASE__ = True
def A ( ) -> Union[str, Any]:
A__ = os.getenv('TRANSFORMERS_VERBOSITY' , a_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def A ( ) -> str:
return __name__.split('.' )[0]
def A ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def A ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
A__ = logging.StreamHandler() # Set sys.stderr as stream.
A__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
A__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
A__ = False
def A ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
A__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
A__ = None
def A ( ) -> Optional[Any]:
return log_levels
def A ( __UpperCamelCase = None ) -> logging.Logger:
if name is None:
A__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(a_ )
def A ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A ( __UpperCamelCase ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(a_ )
def A ( ) -> int:
return set_verbosity(a_ )
def A ( ) -> List[Any]:
return set_verbosity(a_ )
def A ( ) -> Optional[int]:
return set_verbosity(a_ )
def A ( ) -> int:
return set_verbosity(a_ )
def A ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A ( __UpperCamelCase ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(a_ )
def A ( __UpperCamelCase ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(a_ )
def A ( ) -> None:
_configure_library_root_logger()
A__ = False
def A ( ) -> None:
_configure_library_root_logger()
A__ = True
def A ( ) -> None:
A__ = _get_library_root_logger().handlers
for handler in handlers:
A__ = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(a_ )
def A ( ) -> None:
A__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(a_ )
def A ( self , *__UpperCamelCase , **__UpperCamelCase ) -> int:
A__ = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , a_ )
if no_advisory_warnings:
return
self.warning(*a_ , **a_ )
SCREAMING_SNAKE_CASE__ = warning_advice
@functools.lru_cache(a_ )
def A ( self , *__UpperCamelCase , **__UpperCamelCase ) -> List[str]:
self.warning(*a_ , **a_ )
SCREAMING_SNAKE_CASE__ = warning_once
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , *_snake_case : Any , **_snake_case : Dict ): # pylint: disable=unused-argument
"""simple docstring"""
A__ = args[0] if args else None
def __iter__( self : Optional[Any] ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
def empty_fn(*_snake_case : Dict , **_snake_case : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[int] ):
"""simple docstring"""
return self
def __exit__( self : Optional[Any] , _snake_case : int , _snake_case : List[str] , _snake_case : Dict ):
"""simple docstring"""
return
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : int , *_snake_case : Tuple , **_snake_case : str ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*_UpperCAmelCase , **_UpperCAmelCase )
else:
return EmptyTqdm(*_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self : List[Any] , *_snake_case : Any , **_snake_case : Any ):
"""simple docstring"""
A__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCAmelCase , **_UpperCAmelCase )
def _a ( self : Tuple ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ = _tqdm_cls()
def A ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def A ( ) -> str:
global _tqdm_active
A__ = True
hf_hub_utils.enable_progress_bars()
def A ( ) -> int:
global _tqdm_active
A__ = False
hf_hub_utils.disable_progress_bars()
| 712
|
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.