code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
def __init__( self : str , __UpperCAmelCase : UNetaDModel , __UpperCAmelCase : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Any , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 2000 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , **__UpperCAmelCase : str , ):
'''simple docstring'''
_A = self.unet.config.sample_size
_A = (batch_size, 3, img_size, img_size)
_A = self.unet
_A = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
_A = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_A = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
_A = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
_A = model(__UpperCAmelCase , __UpperCAmelCase ).sample
_A = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
_A , _A = output.prev_sample, output.prev_sample_mean
_A = sample_mean.clamp(0 , 1 )
_A = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 79 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None ):
'''simple docstring'''
super().__init__()
_A = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_A = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
_A = None
_A = torch.nn.Parameter(__UpperCAmelCase )
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Any , __UpperCAmelCase : VQModel , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : TransformeraDModel , __UpperCAmelCase : VQDiffusionScheduler , __UpperCAmelCase : LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_A = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_A = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_A = self.learned_classifier_free_sampling_embeddings.embeddings
_A = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
_A = [""] * batch_size
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_A = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = negative_prompt_embeds.shape[1]
_A = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
_A = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 5.0 , __UpperCAmelCase : float = 1.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_A = len(__UpperCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}''' )
_A = batch_size * num_images_per_prompt
_A = guidance_scale > 1.0
_A = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCAmelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
_A = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_A = self.transformer.num_vector_embeds - 1
_A = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
_A = self.scheduler.timesteps.to(self.device )
_A = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_A = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_A = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
_A , _A = model_output.chunk(2 )
_A = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
_A = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
_A = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_A = self.vqvae.config.vq_embed_dim
_A = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_A = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
_A = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
_A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : float ):
'''simple docstring'''
_A , _A = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
_A = torch.exp(__UpperCAmelCase )
_A = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_A = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
_A = torch.cat((all_true, keep_mask) , dim=1 )
_A = keep_mask[:, :-1, :]
_A = keep_mask.gather(1 , indices.argsort(1 ) )
_A = log_p_x_0.clone()
_A = -torch.inf # -inf = log(0)
return rv
| 79 | 1 |
'''simple docstring'''
lowerCAmelCase : Dict = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 370 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Any = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""facebook/nllb-large-en-ro""": 10_24,
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = ["input_ids", "attention_mask"]
__magic_name__ = NllbTokenizer
__magic_name__ = []
__magic_name__ = []
def __init__( self , snake_case__=None , snake_case__=None , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
_lowerCAmelCase : Dict = legacy_behaviour
super().__init__(
vocab_file=snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , src_lang=snake_case__ , tgt_lang=snake_case__ , additional_special_tokens=snake_case__ , legacy_behaviour=snake_case__ , **snake_case__ , )
_lowerCAmelCase : List[str] = vocab_file
_lowerCAmelCase : int = False if not self.vocab_file else True
_lowerCAmelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase : Any = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[Any] = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase : str = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : Tuple = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a ( self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase : Optional[Any] = src_lang
_lowerCAmelCase : Union[str, Any] = self(snake_case__ , add_special_tokens=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
_lowerCAmelCase : int = self.convert_tokens_to_ids(snake_case__ )
_lowerCAmelCase : Optional[Any] = tgt_lang_id
return inputs
def a ( self , snake_case__ , snake_case__ = "eng_Latn" , snake_case__ = None , snake_case__ = "fra_Latn" , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ , snake_case__ , **snake_case__ )
def a ( self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def a ( self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : int = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : List[str] = [self.eos_token_id]
_lowerCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 25 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__magic_name__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowercase : int = 10000
__lowercase : Optional[List[str]] = None
__lowercase : Optional[datasets.Features] = None
class SCREAMING_SNAKE_CASE_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowercase : Tuple = ParquetConfig
def snake_case_ ( self):
return datasets.DatasetInfo(features=self.config.features)
def snake_case_ ( self , lowerCAmelCase__):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files)
if isinstance(A__ , (str, list, tuple)):
__SCREAMING_SNAKE_CASE = data_files
if isinstance(A__ , A__):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(A__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
__SCREAMING_SNAKE_CASE = []
for split_name, files in data_files.items():
if isinstance(A__ , A__):
__SCREAMING_SNAKE_CASE = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE = [dl_manager.iter_files(A__) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(A__):
with open(A__ , """rb""") as f:
__SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(A__))
break
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files}))
return splits
def snake_case_ ( self , lowerCAmelCase__):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE = table_cast(A__ , self.info.features.arrow_schema)
return pa_table
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f"Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'")
for file_idx, file in enumerate(itertools.chain.from_iterable(A__)):
with open(A__ , """rb""") as f:
__SCREAMING_SNAKE_CASE = pq.ParquetFile(A__)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
__SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(A__)
except ValueError as e:
logger.error(f"Failed to read file \'{file}\' with error {type(A__)}: {e}")
raise
| 100 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase__ :List[Any] = logging.get_logger(__name__)
lowercase__ :Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ :Union[str, Any] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowercase__ :str = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
lowercase__ :Dict = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : str =VOCAB_FILES_NAMES
lowercase_ : Dict =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION
lowercase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any =BertTokenizer
def __init__( self ,A__=None ,A__=None ,A__=True ,A__="[UNK]" ,A__="[SEP]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__=True ,A__=None ,**A__ ,):
super().__init__(
A__ ,tokenizer_file=A__ ,do_lower_case=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,tokenize_chinese_chars=A__ ,strip_accents=A__ ,**A__ ,)
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' ,A__) != do_lower_case
or normalizer_state.get('''strip_accents''' ,A__) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,A__) != tokenize_chinese_chars
):
lowercase = getattr(A__ ,normalizer_state.pop('''type'''))
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**A__)
lowercase = do_lower_case
def A__ ( self ,A__ ,A__=None):
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = self._tokenizer.model.save(A__ ,name=A__)
return tuple(A__)
| 101 | 0 |
import unittest
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ):
'''simple docstring'''
__UpperCamelCase :int = np.shape(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = np.shape(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
__UpperCamelCase :Dict = (
'''Expected the same number of rows for A and B. '''
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
__UpperCamelCase :Union[str, Any] = (
'''Expected the same number of columns for B and C. '''
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = pseudo_inv
if a_inv is None:
try:
__UpperCamelCase :Dict = np.linalg.inv(SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> None:
__UpperCamelCase :str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
__UpperCamelCase :List[Any] = np.array([[0, 3], [3, 0], [2, 3]])
__UpperCamelCase :Tuple = np.array([[2, 1], [6, 3]])
__UpperCamelCase :Optional[int] = schur_complement(__lowercase , __lowercase , __lowercase)
__UpperCamelCase :List[Any] = np.block([[a, b], [b.T, c]])
__UpperCamelCase :Any = np.linalg.det(__lowercase)
__UpperCamelCase :int = np.linalg.det(__lowercase)
__UpperCamelCase :Tuple = np.linalg.det(__lowercase)
self.assertAlmostEqual(__lowercase , det_a * det_s)
def UpperCamelCase__ ( self) -> None:
__UpperCamelCase :List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
__UpperCamelCase :Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]])
__UpperCamelCase :Dict = np.array([[2, 1], [6, 3]])
with self.assertRaises(__lowercase):
schur_complement(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> None:
__UpperCamelCase :Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]])
__UpperCamelCase :Tuple = np.array([[0, 3], [3, 0], [2, 3]])
__UpperCamelCase :List[Any] = np.array([[2, 1, 3], [6, 3, 5]])
with self.assertRaises(__lowercase):
schur_complement(__lowercase , __lowercase , __lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 371 | import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[str] = torch.nn.Linear(2 , 4 )
__UpperCamelCase :Any = torch.optim.AdamW(model.parameters() , lr=1.0 )
__UpperCamelCase :List[Any] = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__UpperCamelCase :List[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__UpperCamelCase :Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@require_cuda
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Dict = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase):
__UpperCamelCase :Any = Accelerator(cpu=__lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase :List[Any] = GradientState()
assert state.num_steps == 1
__UpperCamelCase :Any = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__UpperCamelCase :int = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = create_components()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :int = accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :str = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def UpperCamelCase__ ( self) -> Union[str, Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowercase , **__lowercase):
pass
with patch('''torch.cuda.set_device''' , __lowercase), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64'''):
__UpperCamelCase :Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device) , '''cuda:64''')
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Tuple = get_signature(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# make sure loaded weights match
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
__UpperCamelCase :Any = get_signature(__lowercase)
# saving hook
def save_config(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Union[str, Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__lowercase , '''data.json''') , '''w''') as f:
json.dump(__lowercase , __lowercase)
# loading hook
def load_config(__lowercase , __lowercase):
with open(os.path.join(__lowercase , '''data.json''') , '''r''') as f:
__UpperCamelCase :Dict = json.load(__lowercase)
__UpperCamelCase :Dict = config['''class_name''']
__UpperCamelCase :Union[str, Any] = accelerator.register_save_state_pre_hook(__lowercase)
__UpperCamelCase :Any = accelerator.register_load_state_pre_hook(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match with hooks
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# random class name to verify correct one is loaded
__UpperCamelCase :int = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase)
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) > 1E-3)
# random class name to verify correct one is loaded
__UpperCamelCase :Dict = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase)
self.assertTrue(abs(model_signature - get_signature(__lowercase)) < 1E-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Union[str, Any] = create_components()
__UpperCamelCase :Optional[Any] = None
# This should work
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertTrue(dummy_obj is None)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = create_components()
__UpperCamelCase :Dict = [1, 2, 3]
# This should work
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__lowercase , '''_is_accelerate_prepared''' , __lowercase) , __lowercase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def UpperCamelCase__ ( self) -> int:
from transformers import AutoModelForCausalLM
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map={'''''': 0} , )
__UpperCamelCase :Optional[Any] = Accelerator()
# This should work
__UpperCamelCase :int = accelerator.prepare(__lowercase)
@slow
@require_bnb
def UpperCamelCase__ ( self) -> List[str]:
from transformers import AutoModelForCausalLM
__UpperCamelCase :str = Accelerator()
with init_empty_weights():
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__UpperCamelCase :List[str] = infer_auto_device_map(__lowercase)
__UpperCamelCase :str = '''cpu'''
__UpperCamelCase :List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=__lowercase , load_in_abit=__lowercase , llm_inta_enable_fpaa_cpu_offload=__lowercase)
# This should not work and get value error
with self.assertRaises(__lowercase):
__UpperCamelCase :Union[str, Any] = accelerator.prepare(__lowercase)
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self) -> Union[str, Any]:
from transformers import AutoModelForCausalLM
__UpperCamelCase :int = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__UpperCamelCase :Tuple = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__UpperCamelCase :int = infer_auto_device_map(__lowercase)
__UpperCamelCase :List[Any] = 1
__UpperCamelCase :int = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
__UpperCamelCase :Dict = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase):
__UpperCamelCase :Any = accelerator.prepare(__lowercase)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCamelCase__ ( self) -> Dict:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__UpperCamelCase :List[str] = infer_auto_device_map(__lowercase)
__UpperCamelCase :Optional[int] = 1
__UpperCamelCase :Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__lowercase , device_map=__lowercase , )
__UpperCamelCase :int = Accelerator()
# This should work
__UpperCamelCase :int = accelerator.prepare(__lowercase)
@require_cuda
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :Tuple = torch.nn.Linear(10 , 10)
__UpperCamelCase :Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01)
__UpperCamelCase :Any = Accelerator(cpu=__lowercase)
__UpperCamelCase :Tuple = accelerator.prepare(__lowercase)
| 105 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCAmelCase : Optional[Any] = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_lowerCAmelCase : Optional[Any] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_lowerCAmelCase : Optional[Any] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=4 , __snake_case=False ) -> Any:
'''simple docstring'''
__a =compute_bleu(
reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ )
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 218 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 20_48,
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__="<|endoftext|>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
__lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space:
__lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) )
__lowerCamelCase = add_prefix_space
__lowerCamelCase = pre_tok_class(**lowerCamelCase__ )
__lowerCamelCase = add_prefix_space
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
__lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ) + [self.eos_token_id] )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 90 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = old_name.split("." )
if layer == "0":
SCREAMING_SNAKE_CASE__ : int = old_name.replace("0" , "convolution1" )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_name.replace("3" , "convolution2" )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = R"\b\d{2}\b"
if bool(re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.search(R"\d\.\d\d." , SCREAMING_SNAKE_CASE__ ).group()
else:
SCREAMING_SNAKE_CASE__ : str = re.search(R"\d\.\d." , SCREAMING_SNAKE_CASE__ ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : Optional[int] = old_name.replace(SCREAMING_SNAKE_CASE__ , "" )
SCREAMING_SNAKE_CASE__ : Tuple = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "intermediate_stages." + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : str = old_name.replace(SCREAMING_SNAKE_CASE__ , "" )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
SCREAMING_SNAKE_CASE__ : Dict = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : Optional[Any] = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : Dict = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : List[str] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : str = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = trimmed_name.replace("fc2" , "linear_out" )
SCREAMING_SNAKE_CASE__ : str = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : List[Any] = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : List[str] = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : List[str] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : Dict = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "efficientformer." + new_name
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : int = checkpoint.pop(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
return checkpoint
def _a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
def _a ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
SCREAMING_SNAKE_CASE__ : Dict = EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : List[Any] = convert_torch_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : int = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_56
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2_24
SCREAMING_SNAKE_CASE__ : Optional[Any] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
SCREAMING_SNAKE_CASE__ : int = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : List[str] = Compose(
[
Resize(SCREAMING_SNAKE_CASE__ , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(SCREAMING_SNAKE_CASE__ ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),
] )
SCREAMING_SNAKE_CASE__ : List[Any] = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
SCREAMING_SNAKE_CASE__ : int = (1, 10_00)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : List[str] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
_lowerCamelCase : Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 191 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if num < 0:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = -num
SCREAMING_SNAKE_CASE__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : Union[str, Any] = 1_6
lowerCamelCase : int = 3_2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 16 ,lowercase = "bert-base-cased" ) -> Dict:
snake_case : List[str] = AutoTokenizer.from_pretrained(lowercase )
snake_case : Union[str, Any] = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case : List[Any] = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return tokenizer.pad(lowercase ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
snake_case : List[str] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
snake_case : List[Any] = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : Union[str, Any] = config["""lr"""]
snake_case : List[str] = int(config["""num_epochs"""] )
snake_case : str = int(config["""seed"""] )
snake_case : Dict = int(config["""batch_size"""] )
snake_case : Union[str, Any] = args.model_name_or_path
set_seed(lowercase )
snake_case : Dict = get_dataloaders(lowercase ,lowercase ,lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : List[str] = AutoModelForSequenceClassification.from_pretrained(lowercase ,return_dict=lowercase )
# Instantiate optimizer
snake_case : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case : Optional[Any] = optimizer_cls(params=model.parameters() ,lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
snake_case : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
snake_case : Any = 1
snake_case : Dict = (len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=0 ,num_training_steps=lowercase ,)
else:
snake_case : Any = DummyScheduler(lowercase ,total_num_steps=lowercase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case : str = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : int = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case : Tuple = 0
# Now we train the model
snake_case : Any = evaluate.load("""glue""" ,"""mrpc""" )
snake_case : Tuple = 0
snake_case : List[Any] = {}
for epoch in range(lowercase ,lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
snake_case : List[str] = model(**lowercase )
snake_case : Dict = outputs.loss
snake_case : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case : str = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : Optional[int] = model(**lowercase )
snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case : Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
snake_case : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
snake_case : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,lowercase )
snake_case : Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
snake_case : str = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,"""all_results.json""" ) ,"""w""" ) as f:
json.dump(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" ,type=lowercase ,default="""bert-base-cased""" ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=lowercase ,)
parser.add_argument(
"""--output_dir""" ,type=lowercase ,default=""".""" ,help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" ,)
parser.add_argument(
"""--performance_lower_bound""" ,type=lowercase ,default=lowercase ,help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" ,)
parser.add_argument(
"""--num_epochs""" ,type=lowercase ,default=3 ,help="""Number of train epochs.""" ,)
snake_case : str = parser.parse_args()
snake_case : Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 124 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : List[Any] , __lowerCamelCase : Callable , __lowerCamelCase : Optional[Features] = None , __lowerCamelCase : str = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[dict] = None , __lowerCamelCase : Optional[int] = None , **__lowerCamelCase : List[Any] , ):
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _A ( self : List[str] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase :Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCamelCase :Tuple = None
UpperCamelCase :Dict = None
UpperCamelCase :Dict = None
UpperCamelCase :List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
UpperCamelCase :Tuple = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 38 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : str = LxmertConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
_lowerCamelCase : Union[str, Any] = LxmertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__a , __a , __a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 355 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_( _lowerCamelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase : int = str(_lowerCamelCase )
return len(_lowerCamelCase ) == 9 and set(_lowerCamelCase ) == set("123456789" )
def lowerCamelCase_( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
_lowerCamelCase : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(_lowerCamelCase ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 340 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase_ ( __snake_case ) -> List[Any]:
"""simple docstring"""
_lowercase =FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase =FileLock(str(tmpdir / '''foo.lock''' ) )
_lowercase =0.01
with locka.acquire():
with pytest.raises(__snake_case ):
_lowercase =time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def UpperCAmelCase_ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='''a''' * 1000 + '''.lock'''
_lowercase =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowercase =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 5 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__snake_case :Dict = '''bart'''
__snake_case :Tuple = True
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__a = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__a = qar_model.eval()
else:
__a , __a = (None, None)
if MODEL_TYPE == "bart":
__a = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__a = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__a = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__a = sas_model.eval()
else:
__a , __a = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
if LOAD_DENSE_INDEX:
__a = faiss.StandardGpuResources()
__a = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__a = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__a = faiss.IndexFlatIP(128 )
__a = faiss.index_cpu_to_gpu(_UpperCAmelCase , 1 , _UpperCAmelCase )
wikiaab_gpu_index_flat.add(_UpperCAmelCase ) # TODO fix for larger GPU
else:
__a , __a = (None, None)
__a = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_UpperCAmelCase )
def __snake_case ( ):
__a = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__a = elia['''train_eli5''']
__a = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_UpperCAmelCase )
return (elia_train, eli5_train_q_index)
__snake_case ,__snake_case ,__snake_case :List[str] = load_indexes()
__snake_case ,__snake_case ,__snake_case ,__snake_case :Dict = load_models()
__snake_case ,__snake_case :Tuple = load_train_data()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=10 ):
__a = embed_questions_for_retrieval([question] , _UpperCAmelCase , _UpperCAmelCase )
__a , __a = eli5_train_q_index.search(_UpperCAmelCase , _UpperCAmelCase )
__a = [elia_train[int(_UpperCAmelCase )] for i in I[0]]
return nn_examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase="wiki40b" , _UpperCAmelCase="dense" , _UpperCAmelCase=10 ):
if source == "none":
__a , __a = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a = query_qa_dense_index(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = query_es_index(
_UpperCAmelCase , _UpperCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=_UpperCAmelCase , )
__a = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__a = '''question: {} context: {}'''.format(_UpperCAmelCase , _UpperCAmelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _UpperCAmelCase : None),
} )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=256 , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0.8 ):
with torch.no_grad():
__a = qa_sas_generate(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , num_answers=1 , num_beams=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase , do_sample=_UpperCAmelCase , temp=_UpperCAmelCase , top_p=_UpperCAmelCase , top_k=_UpperCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
__snake_case :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
__snake_case :int = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__snake_case :int = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
__snake_case :Union[str, Any] = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
__snake_case :int = st.sidebar.checkbox('''Demo options''')
if demo_options:
__snake_case :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
__snake_case :Tuple = action_list.index(action_st)
__snake_case :Optional[int] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
__snake_case :Dict = show_type == '''Show full text of passages'''
else:
__snake_case :Dict = 3
__snake_case :str = True
__snake_case :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
__snake_case :List[str] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
__snake_case :Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
__snake_case :Optional[int] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
__snake_case :Optional[int] = '''wiki40b'''
__snake_case :Dict = '''dense'''
__snake_case :Dict = '''beam'''
__snake_case :int = 2
__snake_case :str = 64
__snake_case :Tuple = 256
__snake_case :int = None
__snake_case :List[Any] = None
__snake_case :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
__snake_case :Tuple = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
__snake_case :Tuple = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
__snake_case :Dict = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__snake_case :Dict = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__snake_case :List[str] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__snake_case :Tuple = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
__snake_case :Any = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
__snake_case :Any = None
# start main text
__snake_case :Dict = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
__snake_case :int = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__snake_case :Optional[int] = st.text_input('''Enter your question here:''', '''''')
else:
__snake_case :Optional[int] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
__snake_case ,__snake_case :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
__snake_case ,__snake_case :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
__snake_case :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__snake_case :Union[str, Any] = support_list[:10]
__snake_case :Optional[int] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
__snake_case ,__snake_case :Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__snake_case ,__snake_case :Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
__snake_case :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
__snake_case :int = res[1].strip()
if sec_titles == "":
__snake_case :List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
__snake_case :Optional[int] = sec_titles.split(''' & ''')
__snake_case :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
__snake_case :str = find_nearest_training(question)
__snake_case :str = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
__snake_case :Optional[Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
__snake_case :Tuple = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 49 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Tuple = 0
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Dict = Path(a ) / "preprocessor_config.json"
__UpperCamelCase : List[Any] = Path(a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a , "w" ) , )
json.dump({"model_type": "clip"} , open(a , "w" ) )
__UpperCamelCase : Tuple = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :int ) -> str:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Any = Path(a ) / "preprocessor_config.json"
__UpperCamelCase : str = Path(a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(a , "w" ) , )
json.dump({"model_type": "clip"} , open(a , "w" ) )
__UpperCamelCase : str = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__UpperCamelCase : Any = Path(a ) / "preprocessor_config.json"
__UpperCamelCase : List[Any] = Path(a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a , "w" ) , )
json.dump({"model_type": "clip"} , open(a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(a ).to_dict()
config_dict.pop("image_processor_type" )
__UpperCamelCase : Dict = CLIPImageProcessor(**a )
# save in new folder
model_config.save_pretrained(a )
config.save_pretrained(a )
__UpperCamelCase : Any = AutoImageProcessor.from_pretrained(a )
# make sure private variable is not incorrectly saved
__UpperCamelCase : Dict = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Tuple = Path(a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a , "w" ) , )
__UpperCamelCase : str = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
a , "clip-base is not a local folder and is not a valid model identifier" ):
__UpperCamelCase : Any = AutoImageProcessor.from_pretrained("clip-base" )
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
with self.assertRaisesRegex(
a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained(a , revision="aaaaaa" )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
with self.assertRaisesRegex(
a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowerCamelCase ( self :Dict ) -> str:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
__UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a )
__UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a )
__UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def _lowerCamelCase ( self :Dict ) -> Dict:
try:
AutoConfig.register("custom" , a )
AutoImageProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoImageProcessor.register(a , a )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : int = Path(a ) / "preprocessor_config.json"
__UpperCamelCase : Tuple = Path(a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(a , "w" ) , )
json.dump({"model_type": "clip"} , open(a , "w" ) )
__UpperCamelCase : Optional[Any] = CustomImageProcessor.from_pretrained(a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a )
__UpperCamelCase : Dict = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self :List[str] ) -> Any:
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = True
try:
AutoConfig.register("custom" , a )
AutoImageProcessor.register(a , a )
# If remote code is not set, the default is to use local
__UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__UpperCamelCase : Optional[int] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__UpperCamelCase : int = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 151 |
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 2) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCamelCase : List[str] = qubits
# Using Aer's simulator
__UpperCamelCase : int = qiskit.Aer.get_backend("aer_simulator")
# Creating a Quantum Circuit acting on the q register
__UpperCamelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 , _lowerCamelCase):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase)) , list(range(_lowerCamelCase)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__UpperCamelCase : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000)
return job.result().get_counts(_lowerCamelCase)
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}") | 151 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : Any= logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> YolosConfig:
'''simple docstring'''
__snake_case : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__snake_case : Optional[int] = 1_92
__snake_case : str = 7_68
__snake_case : Tuple = 12
__snake_case : List[Any] = 3
__snake_case : str = [8_00, 13_33]
__snake_case : str = False
elif yolos_name == "yolos_s_dWr":
__snake_case : Optional[Any] = 3_30
__snake_case : int = 14
__snake_case : int = 6
__snake_case : Optional[int] = 13_20
elif "yolos_s" in yolos_name:
__snake_case : Tuple = 3_84
__snake_case : List[Any] = 15_36
__snake_case : Union[str, Any] = 12
__snake_case : Union[str, Any] = 6
elif "yolos_b" in yolos_name:
__snake_case : Optional[int] = [8_00, 13_44]
__snake_case : Optional[int] = 91
__snake_case : Dict = 'huggingface/label-files'
__snake_case : Any = 'coco-detection-id2label.json'
__snake_case : List[str] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : str = idalabel
__snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : YolosConfig , UpperCAmelCase_ : bool = False ) -> List[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__snake_case : Dict = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Optional[Any] = in_proj_weight[: config.hidden_size, :]
__snake_case : str = in_proj_bias[: config.hidden_size]
__snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : str = in_proj_weight[-config.hidden_size :, :]
__snake_case : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
if "backbone" in name:
__snake_case : List[Any] = name.replace('backbone' , 'vit' )
if "cls_token" in name:
__snake_case : List[Any] = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
__snake_case : List[str] = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__snake_case : Tuple = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__snake_case : Any = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__snake_case : Optional[int] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__snake_case : Any = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
__snake_case : str = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case : int = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case : Any = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
__snake_case : str = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
__snake_case : Optional[Any] = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
__snake_case : Optional[Any] = name.replace('vit.norm' , 'vit.layernorm' )
return name
def __UpperCAmelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : YolosForObjectDetection ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case : Tuple = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
__snake_case : List[Any] = key.split('.' )
__snake_case : Optional[Any] = int(key_split[2] )
__snake_case : Any = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__snake_case : List[str] = val[:dim, :]
__snake_case : List[Any] = val[
dim : dim * 2, :
]
__snake_case : Any = val[-dim:, :]
else:
__snake_case : Dict = val[:dim]
__snake_case : int = val[dim : dim * 2]
__snake_case : Union[str, Any] = val[-dim:]
else:
__snake_case : Any = val
return orig_state_dict
def __UpperCAmelCase ( ) -> torch.Tensor:
'''simple docstring'''
__snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : List[str] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Any = get_yolos_config(UpperCAmelCase_ )
# load original state_dict
__snake_case : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )['model']
# load 🤗 model
__snake_case : Optional[Any] = YolosForObjectDetection(UpperCAmelCase_ )
model.eval()
__snake_case : List[Any] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
__snake_case : Union[str, Any] = 8_00 if yolos_name != 'yolos_ti' else 5_12
__snake_case : Optional[Any] = YolosImageProcessor(format='coco_detection' , size=UpperCAmelCase_ )
__snake_case : Any = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case : str = model(**UpperCAmelCase_ )
__snake_case , __snake_case : Any = outputs.logits, outputs.pred_boxes
__snake_case , __snake_case : Union[str, Any] = None, None
if yolos_name == "yolos_ti":
__snake_case : Optional[Any] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__snake_case : Union[str, Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__snake_case : List[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__snake_case : List[str] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__snake_case : Optional[Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__snake_case : List[Any] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__snake_case : Optional[Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__snake_case : Union[str, Any] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__snake_case : Optional[int] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__snake_case : int = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
__snake_case : List[str] = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__snake_case : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCAmelCase_ , organization='hustvl' )
model.push_to_hub(UpperCAmelCase_ , organization='hustvl' )
if __name__ == "__main__":
_a : Optional[int]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : Tuple= parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 172 | """simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Any) -> Optional[int]:
__snake_case : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Any , _A : Any) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = hidden_states.shape
__snake_case : Union[str, Any] = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : List[Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : int , _A : str) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__snake_case : Union[str, Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : int = None
UpperCAmelCase : float = 0.0
UpperCAmelCase : bool = None
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : List[str]) -> Dict:
__snake_case : str = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Optional[int] = nn.Dense(_A , dtype=self.dtype)
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Dropout(self.dropout_prob)
__snake_case : Dict = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Optional[Any] = None
if use_nin_shortcut:
__snake_case : List[str] = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__(self : List[Any] , _A : Union[str, Any] , _A : str , _A : int=True) -> Any:
__snake_case : List[Any] = hidden_states
__snake_case : Optional[Any] = self.norma(_A)
__snake_case : int = nn.swish(_A)
__snake_case : Optional[int] = self.conva(_A)
__snake_case : Dict = self.time_emb_proj(nn.swish(_A))
__snake_case : List[str] = jnp.expand_dims(jnp.expand_dims(_A , 1) , 1)
__snake_case : Any = hidden_states + temb
__snake_case : Tuple = self.norma(_A)
__snake_case : Dict = nn.swish(_A)
__snake_case : Union[str, Any] = self.dropout(_A , _A)
__snake_case : Union[str, Any] = self.conva(_A)
if self.conv_shortcut is not None:
__snake_case : List[Any] = self.conv_shortcut(_A)
return hidden_states + residual
| 172 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ViTFeatureExtractor']
_A = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
_A = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'trajectory_transformer'
lowercase_ = ['past_key_values']
lowercase_ = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase_=100 , UpperCAmelCase_=5 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=249 , UpperCAmelCase_=6 , UpperCAmelCase_=17 , UpperCAmelCase_=25 , UpperCAmelCase_=4 , UpperCAmelCase_=4 , UpperCAmelCase_=128 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0006 , UpperCAmelCase_=512 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-1_2 , UpperCAmelCase_=1 , UpperCAmelCase_=True , UpperCAmelCase_=1 , UpperCAmelCase_=50256 , UpperCAmelCase_=50256 , **UpperCAmelCase_ , ) -> List[Any]:
lowerCamelCase : int = vocab_size
lowerCamelCase : List[str] = action_weight
lowerCamelCase : List[Any] = reward_weight
lowerCamelCase : List[str] = value_weight
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : List[str] = block_size
lowerCamelCase : Any = action_dim
lowerCamelCase : List[Any] = observation_dim
lowerCamelCase : Any = transition_dim
lowerCamelCase : int = learning_rate
lowerCamelCase : Union[str, Any] = n_layer
lowerCamelCase : Tuple = n_head
lowerCamelCase : Any = n_embd
lowerCamelCase : Union[str, Any] = embd_pdrop
lowerCamelCase : Optional[int] = attn_pdrop
lowerCamelCase : int = resid_pdrop
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = kaiming_initializer_range
lowerCamelCase : str = use_cache
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
| 205 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase__ = '''\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'''
lowerCAmelCase__ = '''\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'''
lowerCAmelCase__ = r'''\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__(datasets.Metric ):
"""simple docstring"""
def snake_case ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
lowercase__ : List[str] = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 130 | '''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( a_: str = "isbn/0140328726" ):
_UpperCAmelCase : Dict = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
_UpperCAmelCase : Optional[Any] = f"""{olid} is not a valid Open Library olid"""
raise ValueError(a_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( a_: dict ):
_UpperCAmelCase : Dict = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
_UpperCAmelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : Tuple = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
_UpperCAmelCase : Optional[int] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_, a_ ):
_UpperCAmelCase : List[Any] = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__a = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('\n'.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.') | 145 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def A_ ( snake_case_ : str ,snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : int = list(snake_case_ )
UpperCamelCase : Optional[int] = list(snake_case_ )
UpperCamelCase : Optional[Any] = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCamelCase : Tuple = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def A_ ( snake_case_ : list[str] ):
'''simple docstring'''
UpperCamelCase : int = []
while True:
UpperCamelCase : Union[str, Any] = ["""$"""] * len(snake_case_ )
UpperCamelCase : Optional[int] = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1 ,len(snake_case_ ) ):
UpperCamelCase : str = compare_string(binary[i] ,binary[j] )
if k is False:
UpperCamelCase : Any = """*"""
UpperCamelCase : Union[str, Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
UpperCamelCase : Optional[Any] = list(set(snake_case_ ) )
def A_ ( snake_case_ : int ,snake_case_ : Sequence[float] ):
'''simple docstring'''
UpperCamelCase : Optional[int] = []
for minterm in minterms:
UpperCamelCase : List[str] = """"""
for _ in range(snake_case_ ):
UpperCamelCase : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def A_ ( snake_case_ : str ,snake_case_ : str ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = list(snake_case_ )
UpperCamelCase : Optional[int] = list(snake_case_ )
UpperCamelCase : List[Any] = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def A_ ( snake_case_ : list[list[int]] ,snake_case_ : list[str] ):
'''simple docstring'''
UpperCamelCase : int = []
UpperCamelCase : Optional[Any] = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
UpperCamelCase : Tuple = 0
UpperCamelCase : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
UpperCamelCase : Union[str, Any] = j
if count == 1:
UpperCamelCase : Optional[int] = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
UpperCamelCase : Union[str, Any] = 0
temp.append(prime_implicants[i] )
while True:
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Optional[int] = -1
UpperCamelCase : List[Any] = 0
for i in range(len(snake_case_ ) ):
UpperCamelCase : Tuple = chart[i].count(1 )
if count_n > max_n:
UpperCamelCase : Tuple = count_n
UpperCamelCase : Any = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
UpperCamelCase : Optional[Any] = 0
def A_ ( snake_case_ : list[str] ,snake_case_ : list[str] ):
'''simple docstring'''
UpperCamelCase : List[str] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
UpperCamelCase : Tuple = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,snake_case_ ):
UpperCamelCase : Optional[Any] = 1
return chart
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Any = int(input("""Enter the no. of variables\n""" ) )
UpperCamelCase : Tuple = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCamelCase : int = decimal_to_binary(snake_case_ ,snake_case_ )
UpperCamelCase : List[str] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
UpperCamelCase : Dict = prime_implicant_chart(snake_case_ ,snake_case_ )
UpperCamelCase : List[Any] = selection(snake_case_ ,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 27 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( snake_case_ : Dataset ,snake_case_ : Dict[str, str] ):
'''simple docstring'''
UpperCamelCase : List[str] = args.log_outputs
UpperCamelCase : Tuple = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase : List[Any] = load_metric("""wer""" )
UpperCamelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCamelCase : str = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
UpperCamelCase : Dict = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
# print & log results
UpperCamelCase : Optional[int] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(f'{dataset_id}_eval_results.txt' ,"""w""" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase : Optional[Any] = f'log_{dataset_id}_predictions.txt'
UpperCamelCase : str = f'log_{dataset_id}_targets.txt'
with open(snake_case_ ,"""w""" ) as p, open(snake_case_ ,"""w""" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] ,snake_case_ : Tuple ):
p.write(f'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case_ ,with_indices=snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase : str = re.sub(snake_case_ ,"""""" ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase : Tuple = """ """.join(text.split(snake_case_ ) )
return text
def A_ ( snake_case_ : str ):
'''simple docstring'''
# load dataset
UpperCamelCase : Union[str, Any] = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase : Dict = feature_extractor.sampling_rate
# resample audio
UpperCamelCase : Optional[Any] = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase : int = 0 if torch.cuda.is_available() else -1
UpperCamelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Union[str, Any] ):
UpperCamelCase : List[Any] = asr(
batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
UpperCamelCase : Union[str, Any] = prediction["""text"""]
UpperCamelCase : Optional[Any] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase : Any = dataset.map(snake_case_ ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ ,snake_case_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__A : Optional[Any] = parser.parse_args()
main(args)
| 27 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = tempfile.mkdtemp()
__a : List[Any] = BlipImageProcessor()
__a : Optional[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__a : Union[str, Any] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__a : str = InstructBlipProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).tokenizer
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).image_processor
def _lowerCamelCase ( self , **_UpperCAmelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ).qformer_tokenizer
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
__a : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__a : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__a : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
__a : List[Any] = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_image_processor()
__a : Any = self.get_tokenizer()
__a : Tuple = self.get_qformer_tokenizer()
__a : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.prepare_image_inputs()
__a : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
__a : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_image_processor()
__a : str = self.get_tokenizer()
__a : str = self.get_qformer_tokenizer()
__a : str = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = """lower newer"""
__a : int = processor(text=SCREAMING_SNAKE_CASE__ )
__a : Any = tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
__a : int = qformer_tokenizer(SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : int = self.get_qformer_tokenizer()
__a : Optional[Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = """lower newer"""
__a : Optional[int] = self.prepare_image_inputs()
__a : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def _lowerCamelCase ( self ):
__a : Dict = self.get_image_processor()
__a : int = self.get_tokenizer()
__a : Any = self.get_qformer_tokenizer()
__a : Union[str, Any] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = self.get_qformer_tokenizer()
__a : List[str] = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , qformer_tokenizer=SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = """lower newer"""
__a : Tuple = self.prepare_image_inputs()
__a : Any = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
def UpperCAmelCase_ ( ):
return 1
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int ):
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ : int = 2_00 ):
return two_pound(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 367 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCAmelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowerCAmelCase = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
__lowerCAmelCase = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
__lowerCAmelCase = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
__lowerCAmelCase = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
__lowerCAmelCase = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
__lowerCAmelCase = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
__lowerCAmelCase = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
__lowerCAmelCase = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
__lowerCAmelCase = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
__lowerCAmelCase = key.replace("image_encoder.module" , "flava.image_model" )
__lowerCAmelCase = key.replace("text_encoder.module" , "flava.text_model" )
__lowerCAmelCase = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
__lowerCAmelCase = key.replace("mm_encoder.module" , "flava.multimodal_model" )
__lowerCAmelCase = key.replace("text_projection" , "flava.text_projection" )
__lowerCAmelCase = key.replace("image_projection" , "flava.image_projection" )
__lowerCAmelCase = value.float()
for key, value in codebook_state_dict.items():
__lowerCAmelCase = value
return upgrade
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=None ):
if config_path is not None:
__lowerCAmelCase = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
__lowerCAmelCase = FlavaConfig()
__lowerCAmelCase = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
__lowerCAmelCase = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
else:
__lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
__lowerCAmelCase = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = hf_model.state_dict()
__lowerCAmelCase = count_parameters(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
UpperCamelCase__ = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 102 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''LayoutLMv2ImageProcessor'''
UpperCamelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , A=None , A=None , **A ) -> Union[str, Any]:
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
_SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCamelCase__ , return_tensors=lowerCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE = [text] # add batch dimension (as the image processor always adds a batch dimension)
_SCREAMING_SNAKE_CASE = features["""words"""]
_SCREAMING_SNAKE_CASE = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel values
_SCREAMING_SNAKE_CASE = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
_SCREAMING_SNAKE_CASE = self.get_overflowing_images(lowerCamelCase__ , encoded_inputs["""overflow_to_sample_mapping"""] )
_SCREAMING_SNAKE_CASE = images
return encoded_inputs
def snake_case_( self , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f' {len(lowerCamelCase__ )} and {len(lowerCamelCase__ )}' )
return images_with_overflow
def snake_case_( self , *A , **A ) -> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def snake_case_( self , *A , **A ) -> List[Any]:
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def snake_case_( self ) -> int:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case_( self ) -> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase__ , )
return self.image_processor_class
@property
def snake_case_( self ) -> List[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase__ , )
return self.image_processor
| 58 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=16 , lowerCamelCase__=[32, 64, 128] , lowerCamelCase__=[1, 2, 1] , lowerCamelCase__=[2, 2, 4] , lowerCamelCase__=2 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=10 , lowerCamelCase__=8 , lowerCamelCase__=["stage1", "stage2"] , lowerCamelCase__=[1, 2] , ) -> int:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = patch_norm
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = is_training
__lowerCamelCase = scope
__lowerCamelCase = use_labels
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = encoder_stride
__lowerCamelCase = out_features
__lowerCamelCase = out_indices
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
__lowerCamelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__lowerCamelCase = None
__lowerCamelCase = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.type_sequence_label_size
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs
__lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> str:
'''simple docstring'''
return
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reshaped_hidden_states[0].shape
__lowerCamelCase = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
__lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = FocalNetModelTester(self )
| 90 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase : Tuple = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> List[Any]:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 363 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> bool:
snake_case : List[str] = first_str.lower().strip()
snake_case : List[str] = second_str.lower().strip()
# Remove whitespace
snake_case : Any = first_str.replace(""" """ ,"""""" )
snake_case : List[str] = second_str.replace(""" """ ,"""""" )
# Strings of different lengths are not anagrams
if len(lowercase ) != len(lowercase ):
return False
# Default values for count should be 0
snake_case : defaultdict[str, int] = defaultdict(lowercase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : List[Any] = input('Enter the first string ').strip()
lowerCamelCase : Optional[int] = input('Enter the second string ').strip()
lowerCamelCase : Optional[Any] = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 176 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Dict = 1.5
lowerCAmelCase_ : List[str] = int(factor * num_class_images )
lowerCAmelCase_ : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=__UpperCamelCase )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase_ : str = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase_ : Union[str, Any] = int(factor * num_images )
lowerCAmelCase_ : Union[str, Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = tqdm(desc="downloading real regularization images" , total=__UpperCamelCase )
with open(f'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(f'''{class_data_dir}/urls.txt''' , "w" ) as fa, open(
f'''{class_data_dir}/images.txt''' , "w" ) as fa:
while total < num_class_images:
lowerCAmelCase_ : Tuple = class_images[count]
count += 1
try:
lowerCAmelCase_ : Optional[int] = requests.get(images["url"] )
if img.status_code == 200:
lowerCAmelCase_ : Dict = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser("" , add_help=__UpperCamelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
lowercase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 241 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""CLIPFeatureExtractor"""]
lowercase__ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : Union[str, Any] = -1
a : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : Tuple = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
a : List[str] = TextStreamer(lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : int = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : List[str] = -1
a : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : Union[str, Any] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : Dict = tokenizer.decode(greedy_ids[0] )
a : str = TextIteratorStreamer(lowerCAmelCase__ )
a : str = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a : Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
a : Dict = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
a : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : Optional[int] = -1
a : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : List[str] = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ )
a : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
a : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
a : Tuple = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
a : Optional[int] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Tuple:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
a : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
a : Any = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowerCAmelCase__ )
a : Optional[int] = -1
a : Union[str, Any] = torch.ones((1, 5) , device=lowerCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
a : Any = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
a : Optional[int] = cs.out[:-1] # Remove the final "\n"
a : List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __a ( self ) -> Dict:
a : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a : List[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCAmelCase__ )
a : str = -1
a : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
a : List[Any] = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001 )
a : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
a : int = Thread(target=model.generate , kwargs=lowerCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__ ):
a : int = ""
for new_text in streamer:
streamer_text += new_text
| 79 | 0 |
from math import isqrt, loga
def __lowerCamelCase ( snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [True] * max_number
for i in range(2 ,isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ):
_SCREAMING_SNAKE_CASE = False
return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]]
def __lowerCamelCase ( snake_case__ = 80_08_00 ,snake_case__ = 80_08_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = degree * loga(__UpperCamelCase )
_SCREAMING_SNAKE_CASE = int(__UpperCamelCase )
_SCREAMING_SNAKE_CASE = calculate_prime_numbers(__UpperCamelCase )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(__UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 306 | import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
__lowerCamelCase : Any = 5
__lowerCamelCase : Dict = 10
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = SpeechaTextTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def __a ( self : Tuple ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
SCREAMING_SNAKE_CASE__ = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
SCREAMING_SNAKE_CASE__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """<pad>"""
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowercase ) , 10_01 )
def __a ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class __snake_case ( unittest.TestCase ):
lowerCAmelCase_ = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase_ = "C'est trop cool"
lowerCAmelCase_ = "Esto es genial"
@classmethod
def __a ( cls : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __a ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __a ( self : int ):
"""simple docstring"""
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fr"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 219 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]:
'''simple docstring'''
def get_masked_lm_array(lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : Optional[int] = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[Any] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_array(lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : str = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_layer_array(lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : Optional[Any] = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_attention_layer_array(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_UpperCAmelCase : int = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = array.reshape(lowerCAmelCase_ )
if "kernel" in name:
_UpperCAmelCase : List[str] = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
print(F'''Loading model based on config from {config_path}...''' )
_UpperCAmelCase : Any = BertConfig.from_json_file(lowerCAmelCase_ )
_UpperCAmelCase : int = BertForMaskedLM(lowerCAmelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCAmelCase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCAmelCase : BertSelfAttention = layer.attention.self
_UpperCAmelCase : int = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
_UpperCAmelCase : Dict = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
_UpperCAmelCase : Any = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
_UpperCAmelCase : Dict = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
_UpperCAmelCase : Any = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCAmelCase : BertSelfOutput = layer.attention.output
_UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
_UpperCAmelCase : Tuple = get_encoder_attention_layer_array(
lowerCAmelCase_ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
_UpperCAmelCase : int = get_encoder_layer_array(lowerCAmelCase_ , """_attention_layer_norm/gamma""" )
_UpperCAmelCase : Any = get_encoder_layer_array(lowerCAmelCase_ , """_attention_layer_norm/beta""" )
# Intermediate
_UpperCAmelCase : BertIntermediate = layer.intermediate
_UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(lowerCAmelCase_ , """_intermediate_dense/kernel""" )
_UpperCAmelCase : Dict = get_encoder_layer_array(lowerCAmelCase_ , """_intermediate_dense/bias""" )
# Output
_UpperCAmelCase : BertOutput = layer.output
_UpperCAmelCase : Any = get_encoder_layer_array(lowerCAmelCase_ , """_output_dense/kernel""" )
_UpperCAmelCase : List[str] = get_encoder_layer_array(lowerCAmelCase_ , """_output_dense/bias""" )
_UpperCAmelCase : List[Any] = get_encoder_layer_array(lowerCAmelCase_ , """_output_layer_norm/gamma""" )
_UpperCAmelCase : Any = get_encoder_layer_array(lowerCAmelCase_ , """_output_layer_norm/beta""" )
# Embeddings
_UpperCAmelCase : Optional[Any] = get_encoder_array("""_position_embedding_layer/embeddings""" )
_UpperCAmelCase : List[Any] = get_encoder_array("""_type_embedding_layer/embeddings""" )
_UpperCAmelCase : Any = get_encoder_array("""_embedding_norm_layer/gamma""" )
_UpperCAmelCase : Optional[Any] = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
_UpperCAmelCase : Dict = model.cls.predictions.transform
_UpperCAmelCase : Any = get_masked_lm_array("""dense/kernel""" )
_UpperCAmelCase : List[Any] = get_masked_lm_array("""dense/bias""" )
_UpperCAmelCase : Dict = get_masked_lm_array("""layer_norm/gamma""" )
_UpperCAmelCase : Any = get_masked_lm_array("""layer_norm/beta""" )
_UpperCAmelCase : List[Any] = get_masked_lm_array("""embedding_table""" )
# Pooling
_UpperCAmelCase : str = BertPooler(config=lowerCAmelCase_ )
_UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
_UpperCAmelCase : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCAmelCase_ )
# Integration test - should load without any errors ;)
_UpperCAmelCase : str = BertForMaskedLM.from_pretrained(lowerCAmelCase_ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
A_ : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 361 |
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def A__ ( ):
SCREAMING_SNAKE_CASE_ = [randint(-10_00, 10_00 ) for i in range(10 )]
SCREAMING_SNAKE_CASE_ = randint(-50_00, 50_00 )
return (arr, r)
__UpperCAmelCase = make_dataset()
def A__ ( __lowerCamelCase, __lowerCamelCase ):
for triplet in permutations(__lowerCamelCase, 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
arr.sort()
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
for i in range(n - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
SCREAMING_SNAKE_CASE_ = '''
triplet_sum1(*dataset)
'''
SCREAMING_SNAKE_CASE_ = '''
triplet_sum2(*dataset)
'''
SCREAMING_SNAKE_CASE_ = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=1_00_00 )
SCREAMING_SNAKE_CASE_ = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=1_00_00 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 299 |
import math
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def A__ ( __lowerCamelCase = 1 / 1_23_45 ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 3
while True:
SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a_):
lowerCamelCase__ = '''swinv2'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=[2, 2, 6, 2], __a=[3, 6, 12, 24], __a=7, __a=4.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=32, **__a, ):
'''simple docstring'''
super().__init__(**lowercase_)
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : Union[str, Any] = patch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Dict = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : Union[str, Any] = len(lowercase_)
_lowerCAmelCase : Union[str, Any] = num_heads
_lowerCAmelCase : Union[str, Any] = window_size
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : str = qkv_bias
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = drop_path_rate
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(lowercase_) - 1))
_lowerCAmelCase : List[str] = (0, 0, 0, 0)
| 364 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case = False
try:
_snake_case = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = []):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[int] = choices
_lowerCAmelCase : Tuple = prompt
if sys.platform == "win32":
_lowerCAmelCase : Optional[Any] = "*"
else:
_lowerCAmelCase : Dict = "➔ "
def snake_case__ ( self, __a, __a = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 32, __a)
else:
forceWrite(self.choices[index], __a)
def snake_case__ ( self, __a):
'''simple docstring'''
if index == self.position:
forceWrite(f" {self.arrow_char} ")
self.write_choice(__a)
else:
forceWrite(f" {self.choices[index]}")
reset_cursor()
def snake_case__ ( self, __a, __a = 1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a)
move_cursor(__a, direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP["up"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP["down"])
def snake_case__ ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP["newline"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
return self.position
@input.mark(KEYMAP["interrupt"])
def snake_case__ ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position, "DOWN")
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a)] for number in range(10)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = int(chr(self.current_selection))
_lowerCAmelCase : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP, -movement)
elif self.position < index:
self.move_direction(Direction.DOWN, __a)
else:
return
else:
return
def snake_case__ ( self, __a = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, "\n")
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
_lowerCAmelCase : List[Any] = default_choice
for i in range(len(self.choices)):
self.print_choice(__a)
forceWrite("\n")
move_cursor(len(self.choices) - self.position, "UP")
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase : str = int(builtins.input())
except ValueError:
_lowerCAmelCase : List[Any] = default_choice
else:
_lowerCAmelCase : List[str] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1, "UP")
clear_line()
self.write_choice(__a, "\n")
return choice
| 300 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
def __init__( self :Tuple ,__snake_case :List[Any] ,__snake_case :Optional[Any]=13 ,__snake_case :Any=10 ,__snake_case :str=3 ,__snake_case :List[Any]=2 ,__snake_case :Optional[Any]=2 ,__snake_case :Any=True ,__snake_case :int=True ,__snake_case :Dict=32 ,__snake_case :str=5 ,__snake_case :Tuple=4 ,__snake_case :Tuple=37 ,__snake_case :List[Any]="gelu" ,__snake_case :Dict=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :str=10 ,__snake_case :Union[str, Any]=0.02 ,__snake_case :Tuple="divided_space_time" ,__snake_case :Optional[Any]=None ,) -> Union[str, Any]:
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = patch_size
a__ = num_frames
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = attention_type
a__ = initializer_range
a__ = scope
a__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a__ = (image_size // patch_size) ** 2
a__ = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__( self :Tuple ) -> str:
a__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
a__ = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
a__ = self.num_labels
return config
def lowerCamelCase__( self :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :List[str] ,__snake_case :List[Any] ) -> Union[str, Any]:
a__ = TimesformerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ,__snake_case :List[Any] ,__snake_case :Any ) -> str:
a__ = TimesformerForVideoClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case )
# verify the logits shape
a__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,__snake_case )
def lowerCamelCase__( self :List[str] ) -> List[str]:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCAmelCase__ : str = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Optional[int] = False
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = TimesformerModelTester(self )
a__ = ConfigTester(
self ,config_class=__snake_case ,has_text_modality=__snake_case ,hidden_size=37 )
def lowerCamelCase__( self :int ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Optional[int]=False ) -> str:
a__ = copy.deepcopy(__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Dict ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def lowerCamelCase__( self :str ) -> int:
pass
def lowerCamelCase__( self :List[Any] ) -> str:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case ,nn.Linear ) )
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__snake_case )
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase__( self :int ) -> Union[str, Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TimesformerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase__( self :int ) -> Tuple:
if not self.has_attentions:
pass
else:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
a__ = self.model_tester.seq_length
a__ = self.model_tester.num_frames
a__ = True
a__ = False
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
a__ = len(__snake_case )
# Check attention is always last and order is fine
a__ = True
a__ = True
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
self.assertEqual(out_len + 1 ,len(__snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(__snake_case ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def lowerCamelCase__( self :List[Any] ) -> Dict:
def check_hidden_states_output(__snake_case :List[str] ,__snake_case :str ,__snake_case :Optional[Any] ):
a__ = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(__snake_case ,__snake_case ) )
a__ = outputs.hidden_states
a__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__snake_case ) ,__snake_case )
a__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
def __lowercase ( ):
a__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__ = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
@require_torch
@require_vision
class snake_case_ (unittest.TestCase ):
@cached_property
def lowerCamelCase__( self :Optional[int] ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__( self :Tuple ) -> Dict:
a__ = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__snake_case )
a__ = self.default_image_processor
a__ = prepare_video()
a__ = image_processor(video[:8] ,return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
a__ = model(**__snake_case )
# verify the logits
a__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape ,__snake_case )
a__ = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 ) )
| 240 |
import math
def __lowercase ( __lowerCAmelCase : int ):
a__ = [True] * n
a__ = False
a__ = False
a__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < n:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , __lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCAmelCase )
return primes
def __lowercase ( __lowerCAmelCase : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
a__ = math.floor(math.sqrt(__lowerCAmelCase ) ) + 1_0_0
a__ = prime_sieve(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = primes[prime_index]
while (last_prime**2) <= limit:
a__ = primes[prime_index + 1]
a__ = last_prime**2
a__ = next_prime**2
# Get numbers divisible by lps(current)
a__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 240 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__snake_case = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
__snake_case = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
__snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __lowerCAmelCase ( lowercase : str ) -> dict[str, int]:
"""simple docstring"""
snake_case : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowerCAmelCase ( lowercase : tuple ) -> str:
"""simple docstring"""
return x[0]
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
snake_case : Tuple = get_letter_count(lowercase )
snake_case : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase )
snake_case : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase )
snake_case : Optional[int] = "".join(freq_to_letter[freq] )
snake_case : Optional[int] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase , reverse=lowercase )
snake_case : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase )
def __lowerCAmelCase ( lowercase : str ) -> int:
"""simple docstring"""
snake_case : str = get_frequency_order(lowercase )
snake_case : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = self.dummy_uncond_unet
snake_case : Tuple = KarrasVeScheduler()
snake_case : int = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : Dict = torch.manual_seed(0 )
snake_case : Dict = pipe(num_inference_steps=2 , generator=UpperCamelCase__ , output_type="numpy" , return_dict=UpperCamelCase__ )[0]
snake_case : Tuple = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = "google/ncsnpp-celebahq-256"
snake_case : List[str] = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = KarrasVeScheduler()
snake_case : Optional[int] = KarrasVePipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : Dict = torch.manual_seed(0 )
snake_case : Union[str, Any] = pipe(num_inference_steps=20 , generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case : Any = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 112 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] ):
__a : Optional[int] = len(_SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
__a : Optional[Any] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
__a : Optional[int] = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE )
if solved:
print('\n'.join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] ):
__a : Any = len(_SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
__a : Optional[int] = 1
return True
__a : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
__a : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__a : Optional[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__a : List[str] = 1
# check for directions
if (
run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE )
):
return True
__a : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = XLMTokenizer
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: int =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase__: List[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: List[str] =["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase__: Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(UpperCAmelCase_))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] ="lower newer"
lowerCamelCase__: Union[str, Any] ="lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =XLMTokenizer(self.vocab_file , self.merges_file)
lowerCamelCase__: List[Any] ="lower"
lowerCamelCase__: Tuple =["low", "er</w>"]
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: int =tokens + ["<unk>"]
lowerCamelCase__: str =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
lowerCamelCase__: Union[str, Any] =tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 350 |
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0) ->None:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any =row, column
lowerCamelCase__: List[str] =[[default_value for c in range(UpperCAmelCase_)] for r in range(UpperCAmelCase_)]
def __str__(self : Tuple) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCamelCase__: List[str] =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase__: int =max(UpperCAmelCase_ , len(str(UpperCAmelCase_)))
lowerCamelCase__: Any =F"""%{max_element_length}s"""
# Make string and return
def single_line(UpperCAmelCase_ : list[float]) -> str:
nonlocal string_format_identifier
lowerCamelCase__: Tuple ="["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_) for row_vector in self.array)
return s
def __repr__(self : Optional[int]) ->str:
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : tuple[int, int]) ->bool:
'''simple docstring'''
if not (isinstance(UpperCAmelCase_ , (list, tuple)) and len(UpperCAmelCase_) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__(self : int , UpperCAmelCase_ : tuple[int, int]) ->Any:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
return self.array[loc[0]][loc[1]]
def __setitem__(self : Optional[Any] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float) ->None:
'''simple docstring'''
assert self.validate_indicies(UpperCAmelCase_)
lowerCamelCase__: str =value
def __add__(self : Dict , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase__: Dict =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: List[str] =self[r, c] + another[r, c]
return result
def __neg__(self : str) ->Matrix:
'''simple docstring'''
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =-self[r, c]
return result
def __sub__(self : str , UpperCAmelCase_ : Matrix) ->Matrix:
'''simple docstring'''
return self + (-another)
def __mul__(self : List[str] , UpperCAmelCase_ : int | float | Matrix) ->Matrix:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (int, float)): # Scalar multiplication
lowerCamelCase__: List[Any] =Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Union[str, Any] =self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_): # Matrix multiplication
assert self.column == another.row
lowerCamelCase__: Dict =Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase__: int =F"""Unsupported type given for another ({type(UpperCAmelCase_)})"""
raise TypeError(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Matrix:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowerCamelCase__: Optional[int] =self[r, c]
return result
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix) ->Any:
'''simple docstring'''
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase__: Tuple =v.transpose()
lowerCamelCase__: Optional[Any] =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: List[str] =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase__: Union[str, Any] =1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCamelCase__: Optional[int] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =1, 2, -3
lowerCamelCase__: Optional[Any] =Matrix(3 , 1 , 0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__a , __a )}""" )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 273 | 0 |
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowerCamelCase = [True] * (num + 1)
lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCamelCase__ ):
lowerCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : Optional[Any] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 252 |
import enum
import shutil
import sys
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size()
UpperCAmelCase : Dict = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowercase ( enum.Enum ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : int = 1
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="" ):
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 252 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=13 , UpperCamelCase : Any=7 , UpperCamelCase : int=True , UpperCamelCase : Any=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Any=99 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : str=4 , UpperCamelCase : int=37 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[str]=1_28 , UpperCamelCase : Tuple=32 , UpperCamelCase : List[Any]=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : str=4 , UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[int] = seq_length
lowerCAmelCase__ : int = is_training
lowerCAmelCase__ : Optional[int] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : str = scope
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : int = None
if self.use_input_mask:
lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : str = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Dict = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = NezhaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCAmelCase__ : List[Any] = model(UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCAmelCase__ : Any = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : List[str] = NezhaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : str = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Tuple = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
lowerCAmelCase__ : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = NezhaForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : List[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = NezhaForNextSentencePrediction(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : str = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : int = NezhaForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : str = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , next_sentence_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = NezhaForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : str = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Optional[int] = NezhaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Any = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : int = NezhaForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Dict = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.num_choices
lowerCAmelCase__ : Tuple = NezhaForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
_lowerCamelCase :Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase :Dict = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Dict = True
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int=False ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
lowerCAmelCase__ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
lowerCAmelCase__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = NezhaModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase )
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : str = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = NezhaModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Any = model_class(config=UpperCamelCase )
lowerCAmelCase__ : List[Any] = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : int = torch.jit.trace(
UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase , os.path.join(UpperCamelCase , """bert.pt""" ) )
lowerCAmelCase__ : Any = torch.jit.load(os.path.join(UpperCamelCase , """bert.pt""" ) , map_location=UpperCamelCase )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase ) , inputs_dict["""attention_mask"""].to(UpperCamelCase ) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowerCAmelCase__ : Any = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Dict = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Any = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
lowerCAmelCase__ : List[str] = torch.Size((1, 6, 2_11_28) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
| 212 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_A = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_A = {"""facebook/blenderbot-3B""": 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ) -> Tuple:
lowerCAmelCase__ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCAmelCase__ : Any = bs[:]
lowerCAmelCase__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase_ ( __UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Optional[Any] = char
return pairs
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token
lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ : Dict = errors # how to handle errors in decoding
lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode()
lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : str = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : List[str] = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase )
lowerCAmelCase__ : Tuple = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(UpperCamelCase )
lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Tuple = word
return word
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
for token in re.findall(self.pat , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) )
return bpe_tokens
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase )
lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : int = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" )
lowerCAmelCase__ : Optional[Any] = 0
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase__ : Dict = token_index
writer.write(""" """.join(UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase )) + [1]
return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ : Tuple = """ """ + text
return (text, kwargs)
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase )
if len(UpperCamelCase ) > self.model_max_length:
lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 212 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCAmelCase_ : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "upernet"
def __init__( self : Any , lowercase_ : int=None , lowercase_ : Any=512 , lowercase_ : List[Any]=0.02 , lowercase_ : int=[1, 2, 3, 6] , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=0.4 , lowercase_ : int=384 , lowercase_ : Optional[Any]=256 , lowercase_ : str=1 , lowercase_ : List[str]=False , lowercase_ : str=255 , **lowercase_ : str , ):
'''simple docstring'''
super().__init__(**lowercase_)
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
SCREAMING_SNAKE_CASE_ : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''])
elif isinstance(lowercase_ , lowercase_):
SCREAMING_SNAKE_CASE_ : str = backbone_config.get('''model_type''')
SCREAMING_SNAKE_CASE_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : str = config_class.from_dict(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_config
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Any = pool_scales
SCREAMING_SNAKE_CASE_ : int = use_auxiliary_head
SCREAMING_SNAKE_CASE_ : str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_ : Dict = auxiliary_in_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = auxiliary_num_convs
SCREAMING_SNAKE_CASE_ : Any = auxiliary_concat_input
SCREAMING_SNAKE_CASE_ : Tuple = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_ : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ : str = self.__class__.model_type
return output
| 91 |
"""simple docstring"""
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def snake_case ( A__ ,A__ ,A__ ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase_ : Dict = (low + high) // 2
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1
UpperCAmelCase_ : int | float = 0
for i in range(A__ ,low - 1 ,-1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase_ : str = summ
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Dict = 0
for i in range(mid + 1 ,high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase_ : List[Any] = summ
UpperCAmelCase_ : Optional[Any] = i
return max_left, max_right, (left_sum + right_sum)
def snake_case ( A__ ):
UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )]
UpperCAmelCase_ : str = time.time()
max_subarray(A__ ,0 ,input_size - 1 )
UpperCAmelCase_ : int = time.time()
return end - start
def snake_case ( ):
UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(A__ ,A__ ):
print(A__ ,"\t\t" ,A__ )
plt.plot(A__ ,A__ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 268 | 0 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Dict=False ) -> int:
"""simple docstring"""
try:
a : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a : str = default
else:
# KEY is set, convert it to True or False.
try:
a : List[str] = strtobool(snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase : str = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCamelCase : Union[str, Any] = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCamelCase : List[Any] = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCamelCase : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCamelCase : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCamelCase : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCamelCase : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCamelCase : Optional[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCamelCase : Dict = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
a : Dict = unittest.skip('test requires faiss' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
a : int = unittest.skip('test requires regex' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
a : List[Any] = unittest.skip('test requires elasticsearch' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
a : Dict = unittest.skip('test requires sqlalchemy' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
a : str = unittest.skip('test requires PyTorch' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not config.TF_AVAILABLE:
a : Tuple = unittest.skip('test requires TensorFlow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
a : Optional[Any] = unittest.skip('test requires JAX' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
a : str = unittest.skip('test requires Pillow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
def _require_spacy_model(snake_case : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(snake_case )
except ImportError:
return unittest.skip('test requires spacy' )(snake_case )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(snake_case ) )(snake_case )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
a : List[Any] = unittest.skip('test is slow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
a : Dict = unittest.skip('test is local' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
a : Optional[Any] = unittest.skip('test is packaged' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
a : Union[str, Any] = unittest.skip('test requires remote' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : Optional[int] ):
for name, fn in cls.__dict__.items():
if callable(snake_case ) and name.startswith('test' ):
for decorator in decorators:
a : Tuple = decorator(snake_case )
setattr(cls , snake_case , snake_case )
return cls
return decorate
class UpperCamelCase ( a_ ):
"""simple docstring"""
pass
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = 0
A : Optional[int] = 1
A : Tuple = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str]=OfflineSimulationMode.CONNECTION_FAILS , snake_case : List[Any]=1E-1_6 ) -> Dict:
"""simple docstring"""
a : List[Any] = requests.Session().request
def timeout_request(snake_case : Optional[int] , snake_case : Tuple , snake_case : List[Any] , **snake_case : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
a : List[str] = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
a : Any = timeout
try:
return online_request(snake_case , snake_case , **snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a : str = url
a : Union[str, Any] = e.args[0]
a : Dict = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
a : Tuple = (max_retry_error,)
raise
def raise_connection_error(snake_case : Union[str, Any] , snake_case : Optional[Any] , **snake_case : Any ):
raise requests.ConnectionError('Offline mode is enabled.' , request=snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : int ) -> Any:
"""simple docstring"""
a : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case , **snake_case ) as tmp_dir:
try:
os.chdir(snake_case )
yield
finally:
os.chdir(snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
import gc
gc.collect()
a : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
a : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
return deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Any:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case : Tuple , *snake_case : Tuple , **snake_case : str ):
try:
return func(*snake_case , **snake_case )
except HTTPError as err:
if str(snake_case ).startswith('500' ) or str(snake_case ).startswith('502' ):
pytest.xfail(str(snake_case ) )
raise err
return decorator.decorator(_wrapper , snake_case )
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = returncode
a : Optional[Any] = stdout
a : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] ) -> Any:
"""simple docstring"""
while True:
a : Tuple = await stream.readline()
if line:
callback(snake_case )
else:
break
async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int=None , snake_case : Union[str, Any]=None , snake_case : Optional[Any]=None , snake_case : Optional[int]=False , snake_case : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(snake_case ) )
a : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a : int = []
a : List[str] = []
def tee(snake_case : int , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str]="" ):
a : Any = line.decode('utf-8' ).rstrip()
sink.append(snake_case )
if not quiet:
print(snake_case , snake_case , file=snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case : tee(snake_case , snake_case , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda snake_case : tee(snake_case , snake_case , sys.stderr , label='stderr:' ) ),
] , timeout=snake_case , )
return _RunOutput(await p.wait() , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any]=None , snake_case : List[Any]=None , snake_case : Dict=180 , snake_case : List[Any]=False , snake_case : Union[str, Any]=True ) -> _RunOutput:
"""simple docstring"""
a : Union[str, Any] = asyncio.get_event_loop()
a : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case , env=snake_case , stdin=snake_case , timeout=snake_case , quiet=snake_case , echo=snake_case ) )
a : Optional[int] = ' '.join(snake_case )
if result.returncode > 0:
a : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Union[str, Any] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
a : List[str] = re.sub(R'^gw' , '' , snake_case , 0 , re.M )
return int(snake_case )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : List[Any] = 29_500
a : int = pytest_xdist_worker_id()
return port + uniq_delta
| 345 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_snake_case , _snake_case ) ) )
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(_snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(_snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Optional[Any] = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(_snake_case )
lowerCAmelCase : str = []
for value in value_array:
lowerCAmelCase : int = euclidean(_snake_case , dataset[0] )
lowerCAmelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Any = euclidean(_snake_case , _snake_case )
if dist > temp_dist:
lowerCAmelCase : List[Any] = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
return np.dot(_snake_case , _snake_case ) / (norm(_snake_case ) * norm(_snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
'''simple docstring'''
from PIL import Image
def __lowercase ( __lowercase , __lowercase ) -> Image:
'''simple docstring'''
_A = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowercase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__lowercase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
lowerCamelCase_ = change_contrast(img, 1_70)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 79 | 0 |
import math
def snake_case_(_UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
_snake_case = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
_snake_case = int(math.log(number // 3 , 2 ) ) + 2
_snake_case = [3, 5]
_snake_case = 2
_snake_case = 3
for block in range(1 , _UpperCamelCase ):
for _ in range(_UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__A = 0
try:
__A = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 278 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : Union[str, Any] = GPTaTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__="<|endoftext|>" , snake_case__=False , **snake_case__ , ) -> Any:
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
UpperCAmelCase : Tuple =kwargs.pop('''add_bos_token''' , snake_case__ )
UpperCAmelCase : Optional[int] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , snake_case__ ) != add_prefix_space:
UpperCAmelCase : Tuple =getattr(snake_case__ , pre_tok_state.pop('''type''' ) )
UpperCAmelCase : Any =add_prefix_space
UpperCAmelCase : List[Any] =pre_tok_class(**snake_case__ )
UpperCAmelCase : Optional[Any] =add_prefix_space
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : List[str] =kwargs.get('''is_split_into_words''' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =kwargs.get('''is_split_into_words''' , snake_case__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Any =self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ , add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
UpperCAmelCase : Dict =input_ids[-self.model_max_length :]
return input_ids
| 348 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.dummy_uncond_unet
UpperCAmelCase : Optional[int] =KarrasVeScheduler()
UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : List[str] =torch.manual_seed(0 )
UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : str =torch.manual_seed(0 )
UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0]
UpperCAmelCase : Any =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256'''
UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ )
UpperCAmelCase : Dict =KarrasVeScheduler()
UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Any =torch.manual_seed(0 )
UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 348 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> list:
'''simple docstring'''
lowercase = word.split()
def justify(lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> str:
lowercase = max_width - width
lowercase = len(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_UpperCAmelCase ):
num_spaces_between_words_list[i] += 1
lowercase = []
for i in range(_UpperCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_UpperCAmelCase )
lowercase = []
lowercase = []
lowercase = 0
for word in words:
if width + len(_UpperCAmelCase ) + len(_UpperCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_UpperCAmelCase )
width += len(_UpperCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
# reset new line and new width
lowercase = [word], len(_UpperCAmelCase )
lowercase = max_width - width - len(_UpperCAmelCase )
answer.append(""" """.join(_UpperCAmelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358 | """simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32 | 0 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _snake_case ( snake_case ):
def __init__( self , _a=None , _a=None , *_a , **_a ):
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__magic_name__ : List[Any] = self.model.config
else:
__magic_name__ : Optional[Any] = config
__magic_name__ : str = data_args
__magic_name__ : int = self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
__magic_name__ : Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__magic_name__ : Optional[Any] = label_smoothed_nll_loss
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.optimizer is None:
__magic_name__ : Optional[Any] = ["bias", "LayerNorm.weight"]
__magic_name__ : Optional[int] = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
__magic_name__ : List[str] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__magic_name__ : Any = Adafactor
__magic_name__ : Tuple = {"scale_parameter": False, "relative_step": False}
else:
__magic_name__ : Any = AdamW
__magic_name__ : str = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
__magic_name__ : Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
__magic_name__ : Dict = OSS(
params=_a , optim=_a , **_a , )
else:
__magic_name__ : List[str] = optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
__magic_name__ : str = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__magic_name__ : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__magic_name__ : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__magic_name__ : Optional[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def SCREAMING_SNAKE_CASE ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__magic_name__ : Optional[int] = model(**_a , use_cache=_a )[0]
__magic_name__ : str = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__magic_name__ , __magic_name__ : Union[str, Any] = model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
__magic_name__ : Any = model(**_a , use_cache=_a )[0]
__magic_name__ : str = torch.nn.functional.log_softmax(_a , dim=-1 )
__magic_name__ , __magic_name__ : Tuple = self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Optional[int] = inputs.pop("labels" )
__magic_name__ , __magic_name__ : Optional[int] = self._compute_loss(_a , _a , _a )
return loss
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a = None , ):
__magic_name__ : List[str] = self._prepare_inputs(_a )
__magic_name__ : List[str] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__magic_name__ : Optional[int] = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__magic_name__ : Dict = self._pad_tensors_to_max_len(_a , gen_kwargs["max_length"] )
__magic_name__ : List[str] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
__magic_name__ , __magic_name__ : Optional[Any] = self._compute_loss(_a , _a , _a )
__magic_name__ : List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__magic_name__ : Dict = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__magic_name__ : int = self._pad_tensors_to_max_len(_a , gen_kwargs["max_length"] )
return (loss, logits, labels)
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
# If PAD token is not defined at least EOS token has to be defined
__magic_name__ : int = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f''' padded to `max_length`={max_length}''' )
__magic_name__ : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__magic_name__ : Optional[int] = tensor
return padded_tensor
| 281 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"vocab_file": "vocab.txt",
"merges_file": "bpe.codes",
}
snake_case : Dict = {
"vocab_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt",
},
"merges_file": {
"vinai/phobert-base": "https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes",
"vinai/phobert-large": "https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes",
},
}
snake_case : Union[str, Any] = {
"vinai/phobert-base": 256,
"vinai/phobert-large": 256,
}
def lowerCAmelCase_ ( _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
__magic_name__ : List[str] = set(_snake_case )
return pairs
class _snake_case ( snake_case ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , **_a , ):
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , **_a , )
__magic_name__ : Dict = vocab_file
__magic_name__ : Tuple = merges_file
__magic_name__ : List[Any] = {}
__magic_name__ : List[Any] = 0
__magic_name__ : Tuple = 1
__magic_name__ : int = 2
__magic_name__ : Union[str, Any] = 3
self.add_from_file(_a )
__magic_name__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="utf-8" ) as merges_handle:
__magic_name__ : List[str] = merges_handle.read().split("\n" )[:-1]
__magic_name__ : Union[str, Any] = [tuple(merge.split()[:-1] ) for merge in merges]
__magic_name__ : Union[str, Any] = dict(zip(_a , range(len(_a ) ) ) )
__magic_name__ : Optional[int] = {}
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _a ):
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_a )
__magic_name__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__magic_name__ : Any = get_pairs(_a )
if not pairs:
return token
while True:
__magic_name__ : str = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : List[str] = bigram
__magic_name__ : List[str] = []
__magic_name__ : List[str] = 0
while i < len(_a ):
try:
__magic_name__ : Any = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : Union[str, Any] = tuple(_a )
__magic_name__ : Optional[int] = new_word
if len(_a ) == 1:
break
else:
__magic_name__ : List[Any] = get_pairs(_a )
__magic_name__ : Optional[int] = "@@ ".join(_a )
__magic_name__ : Tuple = word[:-4]
__magic_name__ : str = word
return word
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = []
__magic_name__ : Dict = re.findall(r"\S+\n?" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(" " ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _a ):
return self.decoder.get(_a , self.unk_token )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = " ".join(_a ).replace("@@ " , "" ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Optional[int] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ : Union[str, Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
if os.path.abspath(self.merges_file ) != os.path.abspath(_a ):
copyfile(self.merges_file , _a )
return out_vocab_file, out_merge_file
def SCREAMING_SNAKE_CASE ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__magic_name__ : List[Any] = f.readlines()
for lineTmp in lines:
__magic_name__ : Optional[Any] = lineTmp.strip()
__magic_name__ : Union[str, Any] = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__magic_name__ : Optional[int] = line[:idx]
__magic_name__ : Dict = len(self.encoder )
| 281 | 1 |
from collections import namedtuple
UpperCamelCase = namedtuple('from_to', 'from_ to')
UpperCamelCase = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ", ".join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ", ".join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
UpperCamelCase = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
UpperCamelCase = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
UpperCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCamelCase = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCamelCase = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = DPRContextEncoderTokenizer
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = DPRQuestionEncoderTokenizer
UpperCamelCase = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCamelCase = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCamelCase = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCamelCase__ )
class __lowerCamelCase :
"""simple docstring"""
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
lowerCAmelCase__ = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE__ ) == len(
SCREAMING_SNAKE_CASE__ ), f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.'
lowerCAmelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
lowerCAmelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
lowerCAmelCase__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> List[DPRSpanPrediction]:
lowerCAmelCase__ = reader_input["input_ids"]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ) -> List[DPRSpanPrediction]:
lowerCAmelCase__ = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowerCAmelCase__ = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = DPRReaderTokenizer
| 221 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping]):
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(features=lowerCAmelCase__ )
a__ : List[str] =torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and column:
if all(
isinstance(lowerCAmelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCAmelCase__ )
return column
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCAmelCase__ , (str, bytes, type(lowerCAmelCase__ )) ):
return value
elif isinstance(lowerCAmelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a__ : Optional[Any] ={}
if isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a__ : Dict ={"dtype": torch.intaa}
elif isinstance(lowerCAmelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a__ : Dict ={"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a__ : Optional[int] =np.asarray(lowerCAmelCase__ )
return torch.tensor(lowerCAmelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCAmelCase__ , "__array__" ) and not isinstance(lowerCAmelCase__ , torch.Tensor ):
a__ : Dict =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCAmelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCAmelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCAmelCase__ , map_list=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Mapping:
'''simple docstring'''
a__ : List[Any] =self.numpy_arrow_extractor().extract_row(lowerCAmelCase__ )
a__ : Tuple =self.python_features_decoder.decode_row(lowerCAmelCase__ )
return self.recursive_tensorize(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> "torch.Tensor":
'''simple docstring'''
a__ : Tuple =self.numpy_arrow_extractor().extract_column(lowerCAmelCase__ )
a__ : Any =self.python_features_decoder.decode_column(lowerCAmelCase__ , pa_table.column_names[0] )
a__ : Optional[int] =self.recursive_tensorize(lowerCAmelCase__ )
a__ : int =self._consolidate(lowerCAmelCase__ )
return column
def _lowercase ( self , lowerCAmelCase__ ) -> Mapping:
'''simple docstring'''
a__ : str =self.numpy_arrow_extractor().extract_batch(lowerCAmelCase__ )
a__ : List[str] =self.python_features_decoder.decode_batch(lowerCAmelCase__ )
a__ : Optional[Any] =self.recursive_tensorize(lowerCAmelCase__ )
for column_name in batch:
a__ : List[Any] =self._consolidate(batch[column_name] )
return batch
| 95 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 95 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
from pathlib import Path
import numpy as np
from PIL import Image
def A_ ( _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def A_ ( _lowerCAmelCase ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray:
UpperCamelCase : Dict = np.zeros_like(_lowerCAmelCase )
UpperCamelCase : List[str] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCamelCase : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCamelCase : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCamelCase : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__lowerCamelCase : int = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
__lowerCamelCase : Tuple = np.array(Image.open(lena_path))
# kernel to be applied
__lowerCamelCase : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__lowerCamelCase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__lowerCamelCase : Any = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 140 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case_ ( __A ):
__A : str = "cvt"
def __init__( self : Dict , lowercase_ : int=3 , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Dict=[4, 2, 2] , lowercase_ : List[Any]=[2, 1, 1] , lowercase_ : Union[str, Any]=[64, 1_92, 3_84] , lowercase_ : Union[str, Any]=[1, 3, 6] , lowercase_ : Optional[Any]=[1, 2, 10] , lowercase_ : Tuple=[4.0, 4.0, 4.0] , lowercase_ : Tuple=[0.0, 0.0, 0.0] , lowercase_ : str=[0.0, 0.0, 0.0] , lowercase_ : Union[str, Any]=[0.0, 0.0, 0.1] , lowercase_ : int=[True, True, True] , lowercase_ : str=[False, False, True] , lowercase_ : Dict=["dw_bn", "dw_bn", "dw_bn"] , lowercase_ : Any=[3, 3, 3] , lowercase_ : Dict=[1, 1, 1] , lowercase_ : Any=[2, 2, 2] , lowercase_ : List[str]=[1, 1, 1] , lowercase_ : Any=[1, 1, 1] , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , **lowercase_ : Any , ) -> List[Any]:
super().__init__(**lowercase_ )
lowercase__ : Tuple = num_channels
lowercase__ : Optional[Any] = patch_sizes
lowercase__ : Optional[int] = patch_stride
lowercase__ : Tuple = patch_padding
lowercase__ : Dict = embed_dim
lowercase__ : str = num_heads
lowercase__ : Dict = depth
lowercase__ : List[str] = mlp_ratio
lowercase__ : Any = attention_drop_rate
lowercase__ : Union[str, Any] = drop_rate
lowercase__ : Optional[int] = drop_path_rate
lowercase__ : Any = qkv_bias
lowercase__ : Optional[Any] = cls_token
lowercase__ : Optional[Any] = qkv_projection_method
lowercase__ : Optional[Any] = kernel_qkv
lowercase__ : Optional[int] = padding_kv
lowercase__ : Union[str, Any] = stride_kv
lowercase__ : str = padding_q
lowercase__ : Optional[Any] = stride_q
lowercase__ : Tuple = initializer_range
lowercase__ : str = layer_norm_eps
| 87 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 1 |
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Union[str, Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
snake_case : Tuple = ""
snake_case : Optional[int] = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__lowerCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
snake_case : Tuple = 0, 0
# length[i] shows the length of palindromic substring with center i
snake_case : Any = [1 for i in range(len(__lowerCamelCase ) )]
# for each character in new_string find corresponding palindromic string
snake_case : int = 0
for j in range(len(__lowerCamelCase ) ):
snake_case : Optional[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__lowerCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
snake_case : List[str] = j - k + 1 # noqa: E741
snake_case : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
snake_case : Optional[Any] = length[j]
snake_case : int = j
# create that string
snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowerCamelCase = ["""text""", """image""", """audio"""]
def UpperCamelCase ( __lowerCamelCase : List[str] ):
snake_case : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def UpperCamelCase ( __lowerCamelCase : List ):
snake_case : List[str] = []
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class UpperCAmelCase :
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[str]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
snake_case : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
snake_case : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = create_inputs(self.tool.inputs )
snake_case : Dict = self.tool(*snake_case__ )
# There is a single output
if len(self.tool.outputs ) == 1:
snake_case : List[Any] = [outputs]
self.assertListEqual(output_types(snake_case__ ) , self.tool.outputs )
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _SCREAMING_SNAKE_CASE (self : int ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = create_inputs(self.tool.inputs )
snake_case : int = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[Any] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case__ , self.tool.outputs ):
snake_case : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = create_inputs(self.tool.inputs )
snake_case : str = []
for _input, input_type in zip(snake_case__ , self.tool.inputs ):
if isinstance(snake_case__ , snake_case__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
snake_case : Optional[int] = self.tool(*snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
snake_case : List[str] = [outputs]
self.assertEqual(len(snake_case__ ) , len(self.tool.outputs ) )
| 10 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCAmelCase__ ( __A ):
a__ : Any = """roberta"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_02_65 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : int=30_72 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1e-12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class lowerCAmelCase__ ( __A ):
@property
def __A ( self : Dict ) -> Any:
if self.task == "multiple-choice":
__lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 270 | import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCAmelCase = """sshleifer/bart-tiny-random"""
_UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return AutoConfig.from_pretrained(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : Tuple = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : int = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : str = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ , *A_ : List[str] = create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase ):
create_student_by_copying_alternating_layers(lowercase , tempfile.mkdtemp() , e=lowercase , d=lowercase )
| 140 | 0 |
'''simple docstring'''
def A_( A : list[int]):
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty')
UpperCamelCase = sum(a_) / len(a_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
lowerCAmelCase : List[Any] = [8, 5, 9, 7]
lowerCAmelCase : str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCAmelCase : Tuple = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_ , A_ , )-> None:
'''simple docstring'''
UpperCamelCase = claim_vector
UpperCamelCase = allocated_resources_table
UpperCamelCase = maximum_claim_table
def UpperCAmelCase_ ( self )-> list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self )-> list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self )-> list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(A_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self )-> dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(A_ ): i for i in self.__need()}
def UpperCAmelCase_ ( self , **A_ )-> None:
'''simple docstring'''
UpperCamelCase = self.__need()
UpperCamelCase = self.__allocated_resources_table
UpperCamelCase = self.__available_resources()
UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
UpperCamelCase = False
for each_need in need_list:
UpperCamelCase = True
for index, need in enumerate(A_ ):
if need > available_resources[index]:
UpperCamelCase = False
break
if execution:
UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(A_ )
# update available/freed resources stack
UpperCamelCase = np.array(A_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(A_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(A_ ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(A_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(A_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Dict = logging.get_logger(__name__)
UpperCamelCase__: Optional[Any] = ["model.decoder.embed_positions.weights"]
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
if "emb" in name:
UpperCAmelCase : int = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
UpperCAmelCase : Optional[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
UpperCAmelCase : List[str] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
UpperCAmelCase : Any = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
UpperCAmelCase : str = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
UpperCAmelCase : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
UpperCAmelCase : Optional[int] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def snake_case_ ( _lowerCAmelCase : OrderedDict , _lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
UpperCAmelCase : int = list(state_dict.keys() )
UpperCAmelCase : Tuple = {}
for key in keys:
UpperCAmelCase : Optional[int] = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = rename_keys(_lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Union[str, Any] = val[:hidden_size, :]
UpperCAmelCase : str = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : str = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : Tuple = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( _lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCAmelCase : Optional[int] = 1024
UpperCAmelCase : List[Any] = 24
UpperCAmelCase : List[Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : Tuple = 1536
UpperCAmelCase : Union[str, Any] = 48
UpperCAmelCase : Tuple = 24
elif checkpoint == "large":
UpperCAmelCase : Tuple = 2048
UpperCAmelCase : List[Any] = 48
UpperCAmelCase : List[Any] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
UpperCAmelCase : List[Any] = MusicgenDecoderConfig(
hidden_size=_lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowerCAmelCase , num_attention_heads=_lowerCAmelCase , )
return config
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Dict="cpu" ) -> str:
UpperCAmelCase : List[str] = MusicGen.get_pretrained(_lowerCAmelCase , device=_lowerCAmelCase )
UpperCAmelCase : Dict = decoder_config_from_checkpoint(_lowerCAmelCase )
UpperCAmelCase : Any = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : Optional[int] = rename_state_dict(
_lowerCAmelCase , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained('''t5-base''' )
UpperCAmelCase : Dict = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
UpperCAmelCase : Optional[int] = MusicgenForCausalLM(_lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_lowerCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCAmelCase : Any = MusicgenForConditionalGeneration(text_encoder=_lowerCAmelCase , audio_encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowerCAmelCase )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : List[str] = model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''t5-base''' )
UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
UpperCAmelCase : int = MusicgenProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase )
# set the appropriate bos/pad token ids
UpperCAmelCase : Optional[int] = 2048
UpperCAmelCase : List[Any] = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_lowerCAmelCase )
processor.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
UpperCamelCase__: Optional[int] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296 | 0 |
import argparse
from collections import defaultdict
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : int = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(UpperCamelCase_ , """r""" ) as f:
_lowerCAmelCase : Union[str, Any] = f.readlines()
_lowerCAmelCase : List[Any] = F"class {class_name}("
_lowerCAmelCase : List[Any] = F"{4 * ' '}def {test_name}("
_lowerCAmelCase : Any = F"{8 * ' '}{correct_line.split()[0]}"
_lowerCAmelCase : int = F"{16 * ' '}{correct_line.split()[0]}"
_lowerCAmelCase : Any = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = 0
_lowerCAmelCase : str = []
for line in lines:
if line.startswith(UpperCamelCase_ ):
_lowerCAmelCase : Dict = True
elif in_class and line.startswith(UpperCamelCase_ ):
_lowerCAmelCase : Union[str, Any] = True
elif in_class and in_func and (line.startswith(UpperCamelCase_ ) or line.startswith(UpperCamelCase_ )):
_lowerCAmelCase : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowerCAmelCase : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowerCAmelCase : Tuple = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
_lowerCAmelCase : Dict = False
else:
new_lines.append(UpperCamelCase_ )
with open(UpperCamelCase_ , """w""" ) as f:
for line in new_lines:
f.write(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=None ):
'''simple docstring'''
if fail is not None:
with open(UpperCamelCase_ , """r""" ) as f:
_lowerCAmelCase : Any = {l.strip() for l in f.readlines()}
else:
_lowerCAmelCase : str = None
with open(UpperCamelCase_ , """r""" ) as f:
_lowerCAmelCase : str = f.readlines()
_lowerCAmelCase : List[str] = defaultdict(UpperCamelCase_ )
for line in correct_lines:
_lowerCAmelCase : List[Any] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
_lowerCamelCase : Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 350 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = state_dict.pop(UpperCamelCase_ )
_lowerCAmelCase : Tuple = val
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCAmelCase : List[str] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_lowerCAmelCase : Optional[Any] = value
else:
_lowerCAmelCase : Union[str, Any] = value
return new_state_dict
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCAmelCase : Dict = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_lowerCAmelCase : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : str = in_proj_weight[:256, :]
_lowerCAmelCase : Tuple = in_proj_bias[:256]
_lowerCAmelCase : Tuple = in_proj_weight[256:512, :]
_lowerCAmelCase : Dict = in_proj_bias[256:512]
_lowerCAmelCase : Tuple = in_proj_weight[-256:, :]
_lowerCAmelCase : List[str] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : Optional[int] = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_lowerCAmelCase : Any = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Optional[Any] = in_proj_weight[:256, :]
_lowerCAmelCase : Dict = in_proj_bias[:256]
_lowerCAmelCase : Optional[Any] = in_proj_weight[256:512, :]
_lowerCAmelCase : Optional[Any] = in_proj_bias[256:512]
_lowerCAmelCase : str = in_proj_weight[-256:, :]
_lowerCAmelCase : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowerCAmelCase : Union[str, Any] = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_lowerCAmelCase : str = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCAmelCase : Tuple = in_proj_weight_cross_attn[:256, :]
_lowerCAmelCase : List[str] = in_proj_bias_cross_attn[:256]
_lowerCAmelCase : Dict = in_proj_weight_cross_attn[256:512, :]
_lowerCAmelCase : Any = in_proj_bias_cross_attn[256:512]
_lowerCAmelCase : Tuple = in_proj_weight_cross_attn[-256:, :]
_lowerCAmelCase : List[Any] = in_proj_bias_cross_attn[-256:]
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = image.size
_lowerCAmelCase : List[str] = max(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = 800 if """detection""" in checkpoint_url else 1000
_lowerCAmelCase : Optional[int] = target_max_size / current_max_size
_lowerCAmelCase : Union[str, Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = F.to_tensor(UpperCamelCase_ )
_lowerCAmelCase : int = F.normalize(UpperCamelCase_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase : Dict = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : List[str] = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCAmelCase : int = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_lowerCAmelCase : Tuple = state_dict.pop(UpperCamelCase_ )
_lowerCAmelCase : str = val
# create HuggingFace model and load state dict
_lowerCAmelCase : List[Any] = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_lowerCAmelCase : Any = 15
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = {0: """table""", 1: """table rotated"""}
_lowerCAmelCase : Tuple = idalabel
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase : Optional[Any] = 125
_lowerCAmelCase : int = 6
_lowerCAmelCase : List[str] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_lowerCAmelCase : int = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Any = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
_lowerCAmelCase : int = TableTransformerForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# verify our conversion
_lowerCAmelCase : Optional[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_lowerCAmelCase : Union[str, Any] = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = Image.open(UpperCamelCase_ ).convert("""RGB""" )
_lowerCAmelCase : Optional[Any] = normalize(resize(UpperCamelCase_ , UpperCamelCase_ ) ).unsqueeze(0 )
_lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ )
if "detection" in checkpoint_url:
_lowerCAmelCase : Dict = (1, 15, 3)
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
_lowerCAmelCase : Optional[Any] = (1, 125, 7)
_lowerCAmelCase : Tuple = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
_lowerCAmelCase : Dict = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_lowerCAmelCase : List[str] = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(UpperCamelCase_ )
image_processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 159 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = AlbertTokenizer
A = AlbertTokenizerFast
A = True
A = True
A = True
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Optional[int] = AlbertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "this is a test"
SCREAMING_SNAKE_CASE_ : Optional[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "<pad>"
SCREAMING_SNAKE_CASE_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<pad>" )
self.assertEqual(vocab_keys[1],"<unk>" )
self.assertEqual(vocab_keys[-1],"▁eloquent" )
self.assertEqual(len(_A ),3_0000 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size,3_0000 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(_A,add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[str] = rust_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AlbertTokenizer(_A,keep_accents=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_A,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ),[48, 25, 21, 1289] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_A,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A,[31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."],)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AlbertTokenizer(_A )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode("sequence builders" )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode("multi-sequence build" )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A,_A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="albert-base-v2",revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e",)
| 18 |
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ = "", __magic_name__ = False ) -> None:
"""simple docstring"""
# Mapping from the first character of the prefix of the node
UpperCamelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCamelCase__ : Optional[Any] = is_leaf
UpperCamelCase__ : List[str] = prefix
def UpperCamelCase__ ( self, __magic_name__ ) -> tuple[str, str, str]:
"""simple docstring"""
UpperCamelCase__ : Dict = 0
for q, w in zip(self.prefix, __magic_name__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
for word in words:
self.insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCamelCase__ : Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCamelCase__ : Tuple = RadixNode(prefix=__magic_name__, is_leaf=__magic_name__ )
else:
UpperCamelCase__ : Any = self.nodes[word[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__magic_name__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCamelCase__ : Tuple = remaining_prefix
UpperCamelCase__ : Tuple = self.nodes[matching_string[0]]
UpperCamelCase__ : List[Any] = RadixNode(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = aux_node
if remaining_word == "":
UpperCamelCase__ : Any = True
else:
self.nodes[matching_string[0]].insert(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.nodes.get(word[0], __magic_name__ )
if not incoming_node:
return False
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = incoming_node.match(
__magic_name__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__magic_name__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCamelCase__ : Optional[Any] = list(self.nodes.values() )[0]
UpperCamelCase__ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCamelCase__ : Any = False
# If there is 1 edge, we merge it with its child
else:
UpperCamelCase__ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
UpperCamelCase__ : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCamelCase__ : int = merging_node.nodes
return True
def UpperCamelCase__ ( self, __magic_name__ = 0 ) -> None:
"""simple docstring"""
if self.prefix != "":
print('''-''' * height, self.prefix, ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase__ : Optional[int] = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase__ : Optional[int] = RadixNode()
root.insert_many(__UpperCAmelCase )
assert all(root.find(__UpperCAmelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : int = RadixNode()
UpperCamelCase__ : Any = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__UpperCAmelCase )
print('''Words:''' , __UpperCAmelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 201 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 363 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCamelCase : str = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
for attribute in key.split("." ):
UpperCamelCase : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
UpperCamelCase : Union[str, Any] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
UpperCamelCase : Tuple = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase : int = value
elif weight_type == "weight_g":
UpperCamelCase : Optional[int] = value
elif weight_type == "weight_v":
UpperCamelCase : str = value
elif weight_type == "bias":
UpperCamelCase : Union[str, Any] = value
else:
UpperCamelCase : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Optional[int] = []
UpperCamelCase : Optional[int] = fairseq_model.state_dict()
UpperCamelCase : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase : Any = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase : Dict = True
if "*" in mapped_key:
UpperCamelCase : Any = name.split(_lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase : List[str] = "weight_g"
elif "weight_v" in name:
UpperCamelCase : str = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase : int = "weight"
else:
UpperCamelCase : str = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
UpperCamelCase : str = full_name.split("conv_layers." )[-1]
UpperCamelCase : List[Any] = name.split("." )
UpperCamelCase : Union[str, Any] = int(items[0] )
UpperCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> str:
# load the pre-trained checkpoints
UpperCamelCase : Tuple = torch.load(_lowerCAmelCase )
UpperCamelCase : List[Any] = WavLMConfigOrig(checkpoint["cfg"] )
UpperCamelCase : Any = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCamelCase : Dict = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Tuple = WavLMConfig()
UpperCamelCase : Any = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : int = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 52 |
UpperCAmelCase_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ : str = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> str:
"""simple docstring"""
assert len(str(__A ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a_ : List[str] = year // 1_00
a_ : Optional[int] = (5 * (century % 4) + 2) % 7
a_ : List[str] = year % 1_00
a_ : str = centurian % 12
a_ : List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a_ : Any = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a_ : Any = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=9_9 , UpperCAmelCase__ : int=3_2 , UpperCAmelCase__ : List[str]=5 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Any=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : str=4 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __UpperCAmelCase ( self : Any ) -> List[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : int ) -> int:
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('roberta-base' , from_pt=UpperCAmelCase__ )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 55 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple ) -> Tuple:
# test for the above condition
self.test()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = 0
lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCAmelCase ( self : Dict ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Any ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : List[int] ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCAmelCase = token_ids
lowerCAmelCase = len(self.token_ids )
lowerCAmelCase = -1 # the index of the currently fulfilled step
lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase = True
lowerCAmelCase = completed
else:
# failed to make progress.
lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = False
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=False ) -> Optional[int]:
lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.fulfilled_idx
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : str=True ) -> str:
lowerCAmelCase = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowerCAmelCase = {}
for token_ids in nested_token_ids:
lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowerCAmelCase = {}
lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowerCAmelCase = root
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.trie
for current_token in current_seq:
lowerCAmelCase = start[current_token]
lowerCAmelCase = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : List[List[int]] ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCAmelCase = DisjunctiveTrie(UpperCAmelCase__ )
lowerCAmelCase = nested_token_ids
lowerCAmelCase = self.trie.max_height
lowerCAmelCase = []
lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowerCAmelCase = True
else:
lowerCAmelCase = True
self.reset()
lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = False
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.current_seq
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Constraint] ) -> str:
lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase = max([c.seqlen for c in constraints] )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = False
self.init_state()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = None
lowerCAmelCase = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[List[int]] ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase , lowerCAmelCase = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCAmelCase , lowerCAmelCase = False, False
if self.completed:
lowerCAmelCase = True
lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowerCAmelCase = None
if not complete and stepped:
lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[int] = 'gptsan-japanese'
A_ : Any = [
'past_key_values',
]
A_ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self : Optional[int] , a__ : Optional[Any]=3_6000 , a__ : Union[str, Any]=1280 , a__ : List[Any]=1024 , a__ : List[str]=8192 , a__ : Union[str, Any]=4096 , a__ : int=128 , a__ : Optional[int]=10 , a__ : Tuple=0 , a__ : List[str]=16 , a__ : Any=16 , a__ : Dict=128 , a__ : str=0.0 , a__ : Tuple=1E-5 , a__ : Any=False , a__ : List[Any]=0.0 , a__ : Optional[Any]="float32" , a__ : int=False , a__ : Optional[int]=False , a__ : List[str]=False , a__ : Optional[int]=0.0_0_2 , a__ : List[Any]=False , a__ : List[Any]=True , a__ : Any=3_5998 , a__ : int=3_5995 , a__ : Union[str, Any]=3_5999 , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = d_ff
__snake_case = d_ext
__snake_case = d_spout
__snake_case = num_switch_layers
__snake_case = num_ext_layers
__snake_case = num_switch_layers + num_ext_layers
__snake_case = num_heads
__snake_case = num_experts
__snake_case = expert_capacity
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = router_bias
__snake_case = router_jitter_noise
__snake_case = router_dtype
__snake_case = router_ignore_padding_tokens
__snake_case = output_hidden_states
__snake_case = output_attentions
__snake_case = initializer_factor
__snake_case = output_router_logits
__snake_case = use_cache
super().__init__(
separator_token_id=a__ , pad_token_id=a__ , eos_token_id=a__ , **a__ , )
| 24 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = b.T
A__ = np.sum(np.square(UpperCamelCase__ ) , axis=1 )
A__ = np.sum(np.square(UpperCamelCase__ ) , axis=0 )
A__ = np.matmul(UpperCamelCase__ , UpperCamelCase__ )
A__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = x.reshape(-1 , 3 )
A__ = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ )
return np.argmin(UpperCamelCase__ , axis=1 )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Tuple = ['pixel_values']
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'height': 2_56, 'width': 2_56}
A__ = get_size_dict(__UpperCAmelCase )
A__ = np.array(__UpperCAmelCase ) if clusters is not None else None
A__ = do_resize
A__ = size
A__ = resample
A__ = do_normalize
A__ = do_color_quantize
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__UpperCAmelCase ,size=(size['height'], size['width']) ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,) -> np.ndarray:
A__ = rescale(image=__UpperCAmelCase ,scale=1 / 1_2_7.5 ,data_format=__UpperCAmelCase )
A__ = image - 1
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase )
A__ = resample if resample is not None else self.resample
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
A__ = clusters if clusters is not None else self.clusters
A__ = np.array(__UpperCAmelCase )
A__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
A__ = [self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ) for image in images]
if do_normalize:
A__ = [self.normalize(image=__UpperCAmelCase ) for image in images]
if do_color_quantize:
A__ = [to_channel_dimension_format(__UpperCAmelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
A__ = np.array(__UpperCAmelCase )
A__ = color_quantize(__UpperCAmelCase ,__UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
A__ = images.shape[0]
A__ = images.reshape(__UpperCAmelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
A__ = list(__UpperCAmelCase )
else:
A__ = [to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase ) for image in images]
A__ = {'input_ids': images}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
a_ : Dict = []
a_ : List[str] = 1
while len(__A ) < 1e6:
constant.append(str(__A ) )
i += 1
a_ : List[Any] = ''.join(__A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 361 |
from string import ascii_uppercase
UpperCAmelCase_ : Dict = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ : Optional[int] = dict(enumerate(ascii_uppercase))
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Tuple = len(__A )
a_ : int = 0
while True:
if x == i:
a_ : Tuple = 0
if len(__A ) == len(__A ):
break
key += key[i]
i += 1
return key
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = ''
a_ : Any = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
a_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Any = ''
a_ : Optional[Any] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
a_ : Union[str, Any] = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
a_ : Tuple = 'THE GERMAN ATTACK'
a_ : Dict = 'SECRET'
a_ : Optional[Any] = generate_key(__A , __A )
a_ : Union[str, Any] = cipher_text(__A , __A )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__A , __A )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 120 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
def __init__(self : Dict , __a : Optional[Any] , __a : List[Any]=3 , __a : List[str]=7 , __a : int=True , __a : int=True , __a : Dict=False , __a : List[str]=True , __a : int=99 , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : Optional[Any]=37 , __a : str="gelu" , __a : Tuple=0.1 , __a : Optional[int]=0.1 , __a : str=512 , __a : int=16 , __a : str=2 , __a : Tuple=0.02 , __a : str=3 , __a : Tuple=4 , __a : List[str]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : Union[str, Any] ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , )
def _lowercase (self : Optional[int] , __a : Union[str, Any] , __a : int , __a : Any , __a : int , __a : Union[str, Any] , __a : str , __a : int ):
UpperCAmelCase_ = FalconModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Union[str, Any] , __a : str , __a : Optional[int] , __a : List[Any] , __a : Optional[Any] , __a : str , __a : Any , __a : List[Any] , __a : int , __a : List[Any] , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = FalconModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
UpperCAmelCase_ = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Optional[Any] , __a : Dict , __a : Any , __a : str , __a : Dict , __a : List[Any] , __a : str , __a : Tuple , __a : int , __a : List[Any] , ):
UpperCAmelCase_ = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : Dict , __a : int , __a : Any , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] , __a : int , __a : Tuple , __a : Optional[int] , __a : Any , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = FalconForCausalLM(config=__a )
model.to(__a )
model.eval()
# first forward pass
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["hidden_states"][0]
UpperCAmelCase_ = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : int = (FalconForCausalLM,) if is_torch_available() else ()
a__ : Optional[int] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : Tuple = False
def _lowercase (self : int ):
UpperCAmelCase_ = FalconModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , hidden_size=37 )
def _lowercase (self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , *UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase_ = alibi
self.model_tester.create_and_check_model(__a , *__a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(__a )
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = "single_label_classification"
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(__a )
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = FalconForCausalLM(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , use_cache=__a )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase_ = model._convert_cache_to_standard_format(__a , __a )
for layer in range(len(__a ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = "multi_label_classification"
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(__a )
UpperCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ = FalconForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Tuple ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__a , "use_cache" ):
return
UpperCAmelCase_ = model_class(__a ).to(__a )
if "use_cache" not in inputs:
UpperCAmelCase_ = True
UpperCAmelCase_ = model(**__a )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase_ = (
getattr(__a , "decoder_layers" , __a )
or getattr(__a , "num_decoder_layers" , __a )
or config.num_hidden_layers
)
UpperCAmelCase_ = getattr(__a , "num_kv_heads" , config.num_attention_heads )
UpperCAmelCase_ = getattr(__a , "d_model" , config.hidden_size )
UpperCAmelCase_ = embed_dim // num_attention_heads
UpperCAmelCase_ = outputs["past_key_values"]
self.assertEqual(len(__a ) , __a )
UpperCAmelCase_ , UpperCAmelCase_ = inputs["input_ids"].shape
for i in range(__a ):
if config.new_decoder_architecture:
UpperCAmelCase_ = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(__a )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
UpperCAmelCase_ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=19 )
UpperCAmelCase_ = tokenizer.batch_decode(__a )[0]
self.assertEqual(__a , __a )
@slow
def _lowercase (self : Dict ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(__a )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , do_sample=__a , max_new_tokens=4 )
model.generate(**__a , num_beams=2 , max_new_tokens=4 )
@slow
def _lowercase (self : Union[str, Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__a )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained(__a )
model.eval()
model.to(device=__a )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(__a )
# Test results are the same with and without cache
UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
UpperCAmelCase_ = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 1 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 100 * 2**20, 900 * 2**20] )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , A_ )
UpperCamelCase : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCamelCase : List[str] = dataset_size < in_memory_max_size
else:
UpperCamelCase : List[str] = False
UpperCamelCase : str = is_small_dataset(A_ )
assert result == expected
| 358 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=None , A_=2 , ):
'''simple docstring'''
UpperCamelCase : List[str] = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Union[str, Any] = image_size
UpperCamelCase : Optional[int] = patch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = is_training
UpperCamelCase : Dict = use_labels
UpperCamelCase : List[str] = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = intermediate_size
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : List[Any] = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Union[str, Any] = scope
UpperCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase : Optional[Any] = (image_size // patch_size) ** 2
UpperCamelCase : int = num_patches + 2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase( self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = TFDeiTModel(config=A_ )
UpperCamelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFDeiTForMaskedImageModeling(config=A_ )
UpperCamelCase : Optional[Any] = model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : Optional[Any] = TFDeiTForMaskedImageModeling(A_ )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Any = model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase( self , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.type_sequence_label_size
UpperCamelCase : List[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase : List[Any] = 1
UpperCamelCase : Optional[Any] = TFDeiTForImageClassification(A_ )
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : List[Any] = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs
UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Optional[int] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = TFDeiTModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Dense ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(A_ )
UpperCamelCase : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : List[str] = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = TFDeiTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def A_ ( ) -> str:
UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
UpperCamelCase : str = model(**A_ )
# verify the logits
UpperCamelCase : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase : Tuple = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
| 140 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 48 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 0 |
from __future__ import annotations
def UpperCamelCase( lowercase_ , lowercase_ ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
snake_case_ = int(lowercase_ )
snake_case_ = int(lowercase_ )
snake_case_ = []
for temp in range(int(lowercase_ ) ):
series.append(f'''1 / {pow(temp + 1 , int(lowercase_ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input('''Enter the last number (nth term) of the P-Series'''))
lowerCamelCase_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power)) | 34 |
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase_ = logging.getLogger(__name__)
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Optional[int] = 'masked_bert'
def __init__( self , lowerCamelCase=30522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="topK" , lowerCamelCase="constant" , lowerCamelCase=0.0 , **lowerCamelCase , ) -> List[str]:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = pruning_method
snake_case_ = mask_init
snake_case_ = mask_scale | 34 | 1 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "isbn/0140328726" ):
__a : List[str] = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
__a : Tuple = F"""{olid} is not a valid Open Library olid"""
raise ValueError(_SCREAMING_SNAKE_CASE )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : dict ):
__a : List[str] = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
__a : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a : Union[str, Any] = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
__a : Any = data['First sentence']['value']
for key, value in data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : str = ', '.join(_SCREAMING_SNAKE_CASE )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__lowercase : Optional[int] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__lowercase : List[Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 27 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: tuple[int, ...] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def lowercase__( __UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[str] = []
for key in product(__UpperCamelCase ,repeat=3 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = try_key(__UpperCamelCase ,__UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def lowercase__( __UpperCamelCase: list[str] ,__UpperCamelCase: str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__( __UpperCamelCase: str = "p059_cipher.txt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int]
SCREAMING_SNAKE_CASE : list[str]
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='utf-8' )
SCREAMING_SNAKE_CASE : Optional[int] = [int(__UpperCamelCase ) for number in data.strip().split(',' )]
SCREAMING_SNAKE_CASE : List[Any] = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE : Optional[Any] = filter_common_word(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
SCREAMING_SNAKE_CASE : Dict = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : str = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''lxmert'''
__UpperCamelCase = {}
def __init__( self : int , UpperCamelCase_ : List[str]=3_0_5_2_2 , UpperCamelCase_ : Dict=7_6_8 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : Optional[Any]=9_5_0_0 , UpperCamelCase_ : Optional[int]=1_6_0_0 , UpperCamelCase_ : str=4_0_0 , UpperCamelCase_ : Any=3_0_7_2 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : List[str]=5_1_2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : Tuple=1E-12 , UpperCamelCase_ : List[str]=9 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Tuple=2_0_4_8 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Any=6.67 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : int=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : str=True , **UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Tuple = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : List[str] = intermediate_size
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Union[str, Any] = num_qa_labels
lowerCAmelCase : List[str] = num_object_labels
lowerCAmelCase : str = num_attr_labels
lowerCAmelCase : Union[str, Any] = l_layers
lowerCAmelCase : int = x_layers
lowerCAmelCase : Optional[Any] = r_layers
lowerCAmelCase : Optional[Any] = visual_feat_dim
lowerCAmelCase : Optional[int] = visual_pos_dim
lowerCAmelCase : List[Any] = visual_loss_normalizer
lowerCAmelCase : Optional[int] = task_matched
lowerCAmelCase : Optional[int] = task_mask_lm
lowerCAmelCase : Optional[int] = task_obj_predict
lowerCAmelCase : str = task_qa
lowerCAmelCase : List[Any] = visual_obj_loss
lowerCAmelCase : Union[str, Any] = visual_attr_loss
lowerCAmelCase : Tuple = visual_feat_loss
lowerCAmelCase : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**UpperCamelCase_ )
| 314 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class a ( _lowerCamelCase, _lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self: Union[str, Any] , UpperCamelCase: int = 1_28 , UpperCamelCase: int = 2_56 , UpperCamelCase: float = 20_00.0 , UpperCamelCase: int = 7_68 , UpperCamelCase: int = 12 , UpperCamelCase: int = 12 , UpperCamelCase: int = 64 , UpperCamelCase: int = 20_48 , UpperCamelCase: float = 0.1 , ):
"""simple docstring"""
super().__init__()
A__ = nn.Sequential(
nn.Linear(_lowerCAmelCase , d_model * 4 , bias=_lowerCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowerCAmelCase ) , nn.SiLU() , )
A__ = nn.Embedding(_lowerCAmelCase , _lowerCAmelCase )
A__ = False
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A__ = nn.Dropout(p=_lowerCAmelCase )
A__ = nn.ModuleList()
for lyr_num in range(_lowerCAmelCase ):
# FiLM conditional T5 decoder
A__ = DecoderLayer(d_model=_lowerCAmelCase , d_kv=_lowerCAmelCase , num_heads=_lowerCAmelCase , d_ff=_lowerCAmelCase , dropout_rate=_lowerCAmelCase )
self.decoders.append(_lowerCAmelCase )
A__ = TaLayerNorm(_lowerCAmelCase )
A__ = nn.Dropout(p=_lowerCAmelCase )
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] ):
"""simple docstring"""
A__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Any , UpperCamelCase: Dict ):
"""simple docstring"""
A__ , A__ , A__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A__ = self.conditioning_emb(_lowerCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A__ = torch.broadcast_to(
torch.arange(_lowerCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A__ = self.position_encoding(_lowerCAmelCase )
A__ = self.continuous_inputs_projection(_lowerCAmelCase )
inputs += position_encodings
A__ = self.dropout(_lowerCAmelCase )
# decoder: No padding present.
A__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A__ = [(x, self.encoder_decoder_mask(_lowerCAmelCase , _lowerCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A__ = lyr(
_lowerCAmelCase , conditioning_emb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )[0]
A__ = self.decoder_norm(_lowerCAmelCase )
A__ = self.post_dropout(_lowerCAmelCase )
A__ = self.spec_out(_lowerCAmelCase )
return spec_out
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: int , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: int=1e-6 ):
"""simple docstring"""
super().__init__()
A__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowerCAmelCase , d_kv=_lowerCAmelCase , num_heads=_lowerCAmelCase , dropout_rate=_lowerCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowerCAmelCase , d_kv=_lowerCAmelCase , num_heads=_lowerCAmelCase , dropout_rate=_lowerCAmelCase , layer_norm_epsilon=_lowerCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowerCAmelCase , d_ff=_lowerCAmelCase , dropout_rate=_lowerCAmelCase , layer_norm_epsilon=_lowerCAmelCase ) )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[str]=None , UpperCamelCase: Optional[int]=None , UpperCamelCase: Dict=None , ):
"""simple docstring"""
A__ = self.layer[0](
_lowerCAmelCase , conditioning_emb=_lowerCAmelCase , attention_mask=_lowerCAmelCase , )
if encoder_hidden_states is not None:
A__ = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A__ = self.layer[1](
_lowerCAmelCase , key_value_states=_lowerCAmelCase , attention_mask=_lowerCAmelCase , )
# Apply Film Conditional Feed Forward layer
A__ = self.layer[-1](_lowerCAmelCase , _lowerCAmelCase )
return (hidden_states,)
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple ):
"""simple docstring"""
super().__init__()
A__ = TaLayerNorm(_lowerCAmelCase )
A__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCAmelCase )
A__ = Attention(query_dim=_lowerCAmelCase , heads=_lowerCAmelCase , dim_head=_lowerCAmelCase , out_bias=_lowerCAmelCase , scale_qk=_lowerCAmelCase )
A__ = nn.Dropout(_lowerCAmelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Any , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , ):
"""simple docstring"""
A__ = self.layer_norm(_lowerCAmelCase )
if conditioning_emb is not None:
A__ = self.FiLMLayer(_lowerCAmelCase , _lowerCAmelCase )
# Self-attention block
A__ = self.attention(_lowerCAmelCase )
A__ = hidden_states + self.dropout(_lowerCAmelCase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: List[Any] , UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__()
A__ = Attention(query_dim=_lowerCAmelCase , heads=_lowerCAmelCase , dim_head=_lowerCAmelCase , out_bias=_lowerCAmelCase , scale_qk=_lowerCAmelCase )
A__ = TaLayerNorm(_lowerCAmelCase , eps=_lowerCAmelCase )
A__ = nn.Dropout(_lowerCAmelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Optional[int]=None , UpperCamelCase: List[Any]=None , ):
"""simple docstring"""
A__ = self.layer_norm(_lowerCAmelCase )
A__ = self.attention(
_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
A__ = hidden_states + self.dropout(_lowerCAmelCase )
return layer_output
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: str , UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
super().__init__()
A__ = TaDenseGatedActDense(d_model=_lowerCAmelCase , d_ff=_lowerCAmelCase , dropout_rate=_lowerCAmelCase )
A__ = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCAmelCase )
A__ = TaLayerNorm(_lowerCAmelCase , eps=_lowerCAmelCase )
A__ = nn.Dropout(_lowerCAmelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: str , UpperCamelCase: Optional[Any]=None ):
"""simple docstring"""
A__ = self.layer_norm(_lowerCAmelCase )
if conditioning_emb is not None:
A__ = self.film(_lowerCAmelCase , _lowerCAmelCase )
A__ = self.DenseReluDense(_lowerCAmelCase )
A__ = hidden_states + self.dropout(_lowerCAmelCase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__()
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A__ = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
A__ = nn.Dropout(_lowerCAmelCase )
A__ = NewGELUActivation()
def UpperCamelCase ( self: Any , UpperCamelCase: int ):
"""simple docstring"""
A__ = self.act(self.wi_a(_lowerCAmelCase ) )
A__ = self.wi_a(_lowerCAmelCase )
A__ = hidden_gelu * hidden_linear
A__ = self.dropout(_lowerCAmelCase )
A__ = self.wo(_lowerCAmelCase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: str , UpperCamelCase: List[Any] , UpperCamelCase: List[Any]=1e-6 ):
"""simple docstring"""
super().__init__()
A__ = nn.Parameter(torch.ones(_lowerCAmelCase ) )
A__ = eps
def UpperCamelCase ( self: Dict , UpperCamelCase: Any ):
"""simple docstring"""
A__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowerCAmelCase )
A__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class a ( nn.Module ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_lowerCAmelCase , 3.0 )) ))
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: str ):
"""simple docstring"""
super().__init__()
A__ = nn.Linear(_lowerCAmelCase , out_features * 2 , bias=_lowerCAmelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: str , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = self.scale_bias(_lowerCAmelCase )
A__ , A__ = torch.chunk(_lowerCAmelCase , 2 , -1 )
A__ = x * (1 + scale) + shift
return x
| 335 |
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE :Tuple = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE :Tuple = BASE_URL + '''/user'''
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE :Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowerCAmelCase ( lowerCAmelCase_ :str )->dict[Any, Any]:
'''simple docstring'''
snake_case_ = {
"Authorization": F'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(lowerCAmelCase_ , headers=lowerCAmelCase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 159 | 0 |
from collections import namedtuple
SCREAMING_SNAKE_CASE :List[Any] = namedtuple('from_to', 'from_ to')
SCREAMING_SNAKE_CASE :Any = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00454, 264.172),
'cubicyard': from_to(0.76455, 1.30795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.000236588, 4226.75),
}
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ", ".join(SCREAMING_SNAKE_CASE__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = CTRLTokenizer
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Dict ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__A = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__A = dict(zip(A ,range(len(A ) ) ) )
__A = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__A = {"unk_token": "<unk>"}
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
def UpperCamelCase_ ( self : List[str] ,**A : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : Tuple ):
__A = "adapt react readapt apt"
__A = "adapt react readapt apt"
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
__A = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__A = "adapt react readapt apt"
__A = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokens + [tokenizer.unk_token]
__A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,A )
| 124 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str
lowerCamelCase_ : List[str]
lowerCamelCase_ : Optional[List[str]]
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : List[int]
lowerCamelCase_ : List[int]
lowerCamelCase_ : Optional[List[int]] = None
lowerCamelCase_ : Optional[List[int]] = None
class __lowerCAmelCase ( lowercase_ ):
lowerCamelCase_ : Dict = """train"""
lowerCamelCase_ : Any = """dev"""
lowerCamelCase_ : List[str] = """test"""
class __lowerCAmelCase :
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ ) -> Any:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__=1 , __magic_name__="[SEP]" , __magic_name__=False , __magic_name__=False , __magic_name__=0 , __magic_name__=0 , __magic_name__=-100 , __magic_name__=0 , __magic_name__=True , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = {label: i for i, label in enumerate(__magic_name__ )}
snake_case_ : str = []
for ex_index, example in enumerate(__magic_name__ ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' , __magic_name__ , len(__magic_name__ ) )
snake_case_ : Optional[int] = []
snake_case_ : List[str] = []
for word, label in zip(example.words , example.labels ):
snake_case_ : Optional[int] = tokenizer.tokenize(__magic_name__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__magic_name__ ) > 0:
tokens.extend(__magic_name__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__magic_name__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case_ : Dict = tokenizer.num_special_tokens_to_add()
if len(__magic_name__ ) > max_seq_length - special_tokens_count:
snake_case_ : Optional[Any] = tokens[: (max_seq_length - special_tokens_count)]
snake_case_ : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case_ : str = [sequence_a_segment_id] * len(__magic_name__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case_ : Tuple = [cls_token] + tokens
snake_case_ : int = [pad_token_label_id] + label_ids
snake_case_ : List[Any] = [cls_token_segment_id] + segment_ids
snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case_ : Optional[Any] = [1 if mask_padding_with_zero else 0] * len(__magic_name__ )
# Zero-pad up to the sequence length.
snake_case_ : Union[str, Any] = max_seq_length - len(__magic_name__ )
if pad_on_left:
snake_case_ : int = ([pad_token] * padding_length) + input_ids
snake_case_ : Any = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case_ : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case_ : Optional[Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
assert len(__magic_name__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__magic_name__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__magic_name__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__magic_name__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : str = None
features.append(
InputFeatures(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , label_ids=__magic_name__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __lowerCAmelCase ( lowercase_ ):
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = nn.CrossEntropyLoss().ignore_index
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> str:
'''simple docstring'''
snake_case_ : List[str] = os.path.join(
__magic_name__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__magic_name__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Any = cached_features_file + '''.lock'''
with FileLock(__magic_name__ ):
if os.path.exists(__magic_name__ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
snake_case_ : Optional[int] = torch.load(__magic_name__ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
snake_case_ : Dict = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : Optional[Any] = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , __magic_name__ )
def __len__(self ) -> Tuple:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
lowerCamelCase_ : List[InputFeatures]
lowerCamelCase_ : int = -100
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__=False , __magic_name__ = Split.train , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = token_classification_task.read_examples_from_file(__magic_name__ , __magic_name__ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case_ : List[Any] = token_classification_task.convert_examples_to_features(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case_ : Optional[int] = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case_ : Dict = tf.data.Dataset.from_generator(
__magic_name__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ) -> Any:
'''simple docstring'''
return len(self.features )
def __getitem__(self , __magic_name__ ) -> List[str]:
'''simple docstring'''
return self.features[i]
| 279 |
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2 | 0 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : Dict = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : Dict = 4.0 # Largest value for most examples
lowercase__ : str = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=(-1.0, 1.0) , lowercase_ : Optional[int]=False ) -> Any:
lowercase__ : Dict = output_range
if clip:
lowercase__ : List[str] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : int = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : Union[str, Any]=(-1.0, 1.0) , lowercase_ : Optional[Any]=False ) -> Dict:
lowercase__ : Optional[Any] = input_range
lowercase__ : str = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : str ) -> List[Any]:
lowercase__ : List[str] = input_tokens > 0
lowercase__ : Optional[Any] = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int ) -> int:
lowercase__ : Tuple = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : List[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : Dict = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : str = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : Tuple = ones
lowercase__ : int = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : Union[str, Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : Union[str, Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : int = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Any = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : str = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : List[Any] = mel.cpu().float().numpy()
lowercase__ : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : int = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 370 | import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Tuple=100 , _lowerCamelCase : Tuple=" "):
lowercase__ : Union[str, Any] = text.split(_lowerCamelCase)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(_lowerCamelCase) , _lowerCamelCase)]
def lowercase_ ( _lowerCamelCase : dict):
lowercase__ , lowercase__ : List[str] = [], []
for title, text in zip(documents["title"] , documents["text"]):
if text is not None:
for passage in split_text(_lowerCamelCase):
titles.append(title if title is not None else "")
texts.append(_lowerCamelCase)
return {"title": titles, "text": texts}
def lowercase_ ( _lowerCamelCase : dict , _lowerCamelCase : DPRContextEncoder , _lowerCamelCase : DPRContextEncoderTokenizerFast):
lowercase__ : Union[str, Any] = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=_lowerCamelCase , padding="longest" , return_tensors="pt")["input_ids"]
lowercase__ : Any = ctx_encoder(input_ids.to(device=_lowerCamelCase) , return_dict=_lowerCamelCase).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowercase_ ( _lowerCamelCase : "RagExampleArguments" , _lowerCamelCase : "ProcessingArguments" , _lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info("Step 1 - Create the dataset")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ : str = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ : List[Any] = dataset.map(_lowerCamelCase , batched=_lowerCamelCase , num_proc=processing_args.num_proc)
# And compute the embeddings
lowercase__ : Optional[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=_lowerCamelCase)
lowercase__ : Any = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
lowercase__ : List[Any] = Features(
{"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))}) # optional, save as float32 instead of float64 to save space
lowercase__ : List[Any] = dataset.map(
partial(_lowerCamelCase , ctx_encoder=_lowerCamelCase , ctx_tokenizer=_lowerCamelCase) , batched=_lowerCamelCase , batch_size=processing_args.batch_size , features=_lowerCamelCase , )
# And finally save your dataset
lowercase__ : Optional[int] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset")
dataset.save_to_disk(_lowerCamelCase)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ : Tuple = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("embeddings" , custom_index=_lowerCamelCase)
# And save the index
lowercase__ : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss")
dataset.get_index("embeddings").save(_lowerCamelCase)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case_ :
__A : str = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
__A : Optional[str] = field(
default=__A ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
__A : str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
__A : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
__A : Optional[str] = field(
default=str(Path(__A ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=__A ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
__A : int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class snake_case_ :
__A : int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
__A : int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 333 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "openai/whisper-base"
_lowerCamelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_lowerCamelCase = "transcriber"
_lowerCamelCase = WhisperProcessor
_lowerCamelCase = WhisperForConditionalGeneration
_lowerCamelCase = ["audio"]
_lowerCamelCase = ["text"]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , return_tensors="pt" ).input_features
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.model.generate(inputs=UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0]
| 55 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ : Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ : int = """sshleifer/student_marian_en_ro_6_1"""
a_ : str = """sshleifer/tiny-mbart"""
@require_torch
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , ):
"""simple docstring"""
lowerCamelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()]
lowerCamelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=UpperCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def snake_case ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowerCamelCase_ = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowerCamelCase_ = experiments[experiment_id]
lowerCamelCase_ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowerCamelCase_ = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data["extra_args_str"] )
lowerCamelCase_ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data["n_matches"] )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history
lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()]
lowerCamelCase_ = eval_metrics[0]
lowerCamelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase )
# test if do_predict saves generations and metrics
lowerCamelCase_ = os.listdir(UpperCamelCase )
lowerCamelCase_ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def snake_case ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase ) -> Tuple[int, float]:
lowerCamelCase_ = "--skip_memory_metrics 0"
lowerCamelCase_ = self.run_trainer(
max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(Path(UpperCamelCase , "trainer_state.json" ) ).log_history
lowerCamelCase_ = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowerCamelCase_ = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowerCamelCase_ = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 3e-3 , UpperCamelCase = "adafactor" , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
lowerCamelCase_ = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
lowerCamelCase_ = "\n --do_predict\n ".split()
lowerCamelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase_ = get_gpu_count()
lowerCamelCase_ = get_torch_dist_unique_port()
lowerCamelCase_ = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
lowerCamelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
lowerCamelCase_ = ["run_translation.py"] + args
with patch.object(UpperCamelCase , "argv" , UpperCamelCase ):
main()
return output_dir
| 55 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : Optional[Any] = {'unk_token': '<unk>'}
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
__A : Optional[int] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Optional[Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_tokenizer()
__A : List[str] = self.get_rust_tokenizer()
__A : str = self.get_image_processor()
__A : Tuple = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_slow.save_pretrained(self.tmpdirname)
__A : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase)
__A : List[str] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
processor_fast.save_pretrained(self.tmpdirname)
__A : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase)
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase)
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : Dict = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : List[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Dict = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : Tuple = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Dict = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[Any] = 'lower newer'
__A : Union[str, Any] = processor(text=_UpperCAmelCase)
__A : Dict = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : Any = self.get_tokenizer()
__A : List[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[Any] = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Union[str, Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : str = processor.batch_decode(_UpperCAmelCase)
__A : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Any = 'lower newer'
__A : str = self.prepare_image_inputs()
__A : List[str] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names) | 356 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''image_processor''', '''tokenizer''']
lowerCAmelCase = '''AutoImageProcessor'''
lowerCAmelCase = '''AutoTokenizer'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = self.image_processor
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__A : Any = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if images is not None:
__A : Tuple = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase)
if text is not None and images is not None:
__A : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase) , tensor_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 190 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 4000000 ) -> int:
'''simple docstring'''
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a ) -> str:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self , __a = 1 , __a = 1_00 , __a = None , __a = None , __a = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
UpperCamelCase = int(__a )
if sample_size % down_scale_factor != 0:
UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process." )
UpperCamelCase = int(__a )
UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__a , __a ) and len(__a ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(__a )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCamelCase = randn_tensor(__a , generator=__a , device=self.device , dtype=__a )
# set step values
self.scheduler.set_timesteps(__a , device=audio.device )
UpperCamelCase = self.scheduler.timesteps.to(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(__a , __a ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase = self.scheduler.step(__a , __a , __a ).prev_sample
UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__a )
| 153 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {}
def A ( self : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = {}
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
UpperCamelCase = probability
def A ( self : int ):
"""simple docstring"""
return list(self.connections )
def A ( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCamelCase ( A__ , A__ , A__ ) -> dict[str, int]:
"""simple docstring"""
UpperCamelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A__ , A__ , A__ )
UpperCamelCase = Counter(graph.get_nodes() )
UpperCamelCase = start
for _ in range(A__ ):
UpperCamelCase = graph.transition(A__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_lowerCamelCase : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : str ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Tuple ):
"""simple docstring"""
return (4, 3_2, 3_2)
@property
def A ( self : Dict ):
"""simple docstring"""
return (4, 3_2, 3_2)
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
UpperCamelCase = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : str , UpperCamelCase__ : Any=(3_2, 3_2) ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(UpperCamelCase__ )
UpperCamelCase = noise
UpperCamelCase = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (2_5_6, 2_5_6)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
| 249 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =42
__a =42
def _lowerCamelCase ( lowercase : str ) -> list[str]:
if not isinstance(lowercase , lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(lowercase ) )]
def _lowerCamelCase ( lowercase : str ) -> BWTTransformDict:
if not isinstance(lowercase , lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_a = all_rotations(lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase ),
}
return response
def _lowerCamelCase ( lowercase : str , lowercase : int ) -> str:
if not isinstance(lowercase , lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_a = int(lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_a = [""] * len(lowercase )
for _ in range(len(lowercase ) ):
for i in range(len(lowercase ) ):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase_ : str = 'Provide a string that I will generate its BWT transform: '
lowerCAmelCase_ : List[str] = input(entry_msg).strip()
lowerCAmelCase_ : List[str] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
lowerCAmelCase_ : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 63 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __a ( __UpperCamelCase ):
__lowercase : Any = ['vqvae']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_000
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowercase__: Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
lowercase__: Optional[int] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase__: Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase__: List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
lowercase__: List[Any] = noise
lowercase__: int = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: int = self.mel.audio_slice_to_image(lowerCAmelCase__ )
lowercase__: int = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowercase__: str = (input_image / 255) * 2 - 1
lowercase__: Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase__: Optional[int] = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
lowercase__: Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase__: List[Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
lowercase__: str = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase__: Dict = int(mask_start_secs * pixels_per_second )
lowercase__: Tuple = int(mask_end_secs * pixels_per_second )
lowercase__: List[Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
lowercase__: Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )['sample']
else:
lowercase__: Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
if isinstance(self.scheduler , lowerCAmelCase__ ):
lowercase__: List[str] = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
else:
lowercase__: int = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowercase__: List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase__: Optional[int] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase__: Union[str, Any] = 1 / self.vqvae.config.scaling_factor * images
lowercase__: Optional[Any] = self.vqvae.decode(lowerCAmelCase__ )['sample']
lowercase__: Dict = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__: List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase__: int = (images * 255).round().astype('uint8' )
lowercase__: Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode='RGB' ).convert('L' ) for _ in images) )
lowercase__: Dict = [self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
lowercase__: List[str] = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowercase__: str = (sample / 255) * 2 - 1
lowercase__: str = torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase__: Union[str, Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase__: Optional[Any] = self.scheduler.alphas_cumprod[t]
lowercase__: str = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase__: str = 1 - alpha_prod_t
lowercase__: int = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
lowercase__: int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase__: Optional[int] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase__: Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
lowercase__: Any = acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 196 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 290 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , )
assert hasattr(self , 'env')
def _lowerCamelCase ( self : Any , A : Tuple=1) -> List[str]:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def _lowerCamelCase ( self : Dict , A : int) -> str:
"""simple docstring"""
TrainingJobAnalytics(A).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
_UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A)
| 290 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A =logging.get_logger(__name__)
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _a ( __a ):
def __init__( self : List[Any] , lowercase : int=None , lowercase : Any=None , *lowercase : int , **lowercase : str ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
if config is None:
assert isinstance(self.model , lowercase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
UpperCAmelCase = self.model.config
else:
UpperCAmelCase = config
UpperCAmelCase = data_args
UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , lowercase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase = label_smoothed_nll_loss
def A ( self : Optional[Any] , lowercase : int ):
'''simple docstring'''
if self.optimizer is None:
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
UpperCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase = Adafactor
UpperCAmelCase = {'''scale_parameter''': False, '''relative_step''': False}
else:
UpperCAmelCase = AdamW
UpperCAmelCase = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
UpperCAmelCase = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase = OSS(
params=lowercase , optim=lowercase , **lowercase , )
else:
UpperCAmelCase = optimizer_cls(lowercase , **lowercase )
if self.lr_scheduler is None:
UpperCAmelCase = self._get_lr_scheduler(lowercase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def A ( self : Optional[Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase )
return scheduler
def A ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A ( self : int , lowercase : str , lowercase : Any , lowercase : Any ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase = model(**lowercase , use_cache=lowercase )[0]
UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase , UpperCAmelCase = model(**lowercase , labels=lowercase , use_cache=lowercase )[:2]
else:
# compute label smoothed loss
UpperCAmelCase = model(**lowercase , use_cache=lowercase )[0]
UpperCAmelCase = torch.nn.functional.log_softmax(lowercase , dim=-1 )
UpperCAmelCase , UpperCAmelCase = self.loss_fn(lowercase , lowercase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A ( self : Tuple , lowercase : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = inputs.pop('''labels''' )
UpperCAmelCase , UpperCAmelCase = self._compute_loss(lowercase , lowercase , lowercase )
return loss
def A ( self : str , lowercase : nn.Module , lowercase : Dict[str, Union[torch.Tensor, Any]] , lowercase : bool , lowercase : Optional[List[str]] = None , ):
'''simple docstring'''
UpperCAmelCase = self._prepare_inputs(lowercase )
UpperCAmelCase = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **lowercase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase = self._pad_tensors_to_max_len(lowercase , gen_kwargs['''max_length'''] )
UpperCAmelCase = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase , UpperCAmelCase = self._compute_loss(lowercase , lowercase , lowercase )
UpperCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase = self._pad_tensors_to_max_len(lowercase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def A ( self : List[str] , lowercase : List[Any] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
UpperCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase = tensor
return padded_tensor
| 34 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[Any] , *lowercase : str , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : List[Any] , **lowercase : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : int ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : int , *lowercase : Tuple , **lowercase : Dict ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : List[str] , *lowercase : Optional[int] , **lowercase : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Union[str, Any] , **lowercase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : int = ["""flax""", """transformers"""]
def __init__( self : Optional[int] , *lowercase : Union[str, Any] , **lowercase : Any ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : Tuple , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Any , *lowercase : Dict , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
class _a ( metaclass=__a ):
__a : Any = ["""flax""", """transformers"""]
def __init__( self : Any , *lowercase : Optional[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Dict , *lowercase : Optional[Any] , **lowercase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def A ( cls : Union[str, Any] , *lowercase : str , **lowercase : Any ):
'''simple docstring'''
requires_backends(cls , ['''flax''', '''transformers'''] )
| 34 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """bert-generation"""
def __init__( self : Tuple , __UpperCamelCase : Dict=5_0_3_5_8 , __UpperCamelCase : Optional[int]=1_0_2_4 , __UpperCamelCase : List[Any]=2_4 , __UpperCamelCase : Dict=1_6 , __UpperCamelCase : Tuple=4_0_9_6 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Any=5_1_2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : List[Any]="absolute" , __UpperCamelCase : Tuple=True , **__UpperCamelCase : Union[str, Any] , )->Any:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
| 326 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=_SCREAMING_SNAKE_CASE , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument('''--batch_size''' , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument('''--freeze''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument('''--learning_rate''' , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=_SCREAMING_SNAKE_CASE , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument('''--weight_decay''' , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument('''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''./results''' )
return parser.parse_args()
__A : Union[str, Any] = load("accuracy")
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : str , __UpperCamelCase : Union[str, Any] )->None:
super().__init__()
_UpperCAmelCase = trainer
def lowercase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] )->Any:
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test['''test'''].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = tokenizer(example['''src'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
_UpperCAmelCase = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation['''train'''].column_names , )
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_UpperCAmelCase = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print('''Training...''' )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 326 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=1_6 , __snake_case=2 , __snake_case=0.02 , __snake_case=4 , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_attention_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_choices
def a_ ( self ):
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_attention_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self ):
snake_case = FlaxAlbertModelTester(self )
@slow
def a_ ( self ):
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
snake_case = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case = model(__snake_case , attention_mask=__snake_case )[0]
snake_case = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , __snake_case )
snake_case = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
| 127 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
assert noofclusters < len(UpperCamelCase_ )
# Find out the dimensionality
snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case = list(range(len(UpperCamelCase_ ) ) )
shuffle(UpperCamelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCamelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case = tf.placeholder('''float64''' ,[dim] )
snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCamelCase_ ,UpperCamelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case = [tf.Variable(0 ) for i in range(len(UpperCamelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case = tf.placeholder('''int32''' )
snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCamelCase_ ,UpperCamelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case = tf.placeholder('''float''' ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case = tf.reduce_mean(UpperCamelCase_ ,0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case = tf.placeholder('''float''' ,[dim] )
snake_case = tf.placeholder('''float''' ,[dim] )
snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCamelCase_ ,UpperCamelCase_ ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case = tf.placeholder('''float''' ,[noofclusters] )
snake_case = tf.argmin(UpperCamelCase_ ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCamelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case = 1_00
for _ in range(UpperCamelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCamelCase_ ) ):
snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case = [
sess.run(UpperCamelCase_ ,feed_dict={va: vect, va: sess.run(UpperCamelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case = sess.run(
UpperCamelCase_ ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCamelCase_ ):
# Collect all the vectors assigned to this cluster
snake_case = [
vectors[i]
for i in range(len(UpperCamelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case = sess.run(
UpperCamelCase_ ,feed_dict={mean_input: array(UpperCamelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case = sess.run(UpperCamelCase_ )
snake_case = sess.run(UpperCamelCase_ )
return centroids, assignments
| 127 | 1 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase_ (__A ):
def __lt__( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : List[str] ) -> int:
return self[-1] == other[-1]
def snake_case ( A__ ):
UpperCAmelCase_ : list[Stack] = []
# sort into stacks
for element in collection:
UpperCAmelCase_ : str = Stack([element] )
UpperCAmelCase_ : Union[str, Any] = bisect_left(A__ ,A__ )
if i != len(A__ ):
stacks[i].append(A__ )
else:
stacks.append(A__ )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase_ : int = merge(*(reversed(A__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 253 |
"""simple docstring"""
def snake_case ( A__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(A__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 253 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class A :
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(__lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
A__ = list(__lowerCAmelCase )
A__ = degree
def __add__( self : Dict , __lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
A__ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCAmelCase )
else:
A__ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCAmelCase )
def __sub__( self : str , __lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , __lowerCAmelCase : Polynomial ) -> Polynomial:
"""simple docstring"""
A__ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : int | float ) -> int | float:
"""simple docstring"""
A__ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCAmelCase )
return polynomial
def __repr__( self : Any ) -> str:
"""simple docstring"""
return self.__str__()
def a_ ( self : Any ) -> Polynomial:
"""simple docstring"""
A__ = [0] * self.degree
for i in range(self.degree ):
A__ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCAmelCase )
def a_ ( self : Tuple , __lowerCAmelCase : int | float = 0 ) -> Polynomial:
"""simple docstring"""
A__ = [0] * (self.degree + 2)
A__ = constant
for i in range(self.degree + 1 ):
A__ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCAmelCase )
def __eq__( self : Optional[Any] , __lowerCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Any , __lowerCAmelCase : object ) -> bool:
"""simple docstring"""
return not self.__eq__(__lowerCAmelCase )
| 274 |
from collections import deque
class A :
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
A__ = process_name # process name
A__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A__ = arrival_time
A__ = burst_time # remaining burst time
A__ = 0 # total time of the process wait in ready queue
A__ = 0 # time from arrival time to completion time
class A :
'''simple docstring'''
def __init__( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int , ) -> None:
"""simple docstring"""
A__ = number_of_queues
# time slice of queues that round robin algorithm applied
A__ = time_slices
# unfinished process is in this ready_queue
A__ = queue
# current time
A__ = current_time
# finished process is in this sequence queue
A__ = deque()
def a_ ( self : Dict ) -> list[str]:
"""simple docstring"""
A__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a_ ( self : Tuple , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a_ ( self : Optional[Any] , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a_ ( self : Dict , __lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a_ ( self : int , __lowerCAmelCase : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def a_ ( self : Any , __lowerCAmelCase : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a_ ( self : Union[str, Any] , __lowerCAmelCase : deque[Process] ) -> deque[Process]:
"""simple docstring"""
A__ = deque() # sequence deque of finished process
while len(__lowerCAmelCase ) != 0:
A__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A__ = 0
# set the process's turnaround time because it is finished
A__ = self.current_time - cp.arrival_time
# set the completion time
A__ = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a_ ( self : Optional[Any] , __lowerCAmelCase : deque[Process] , __lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
A__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCAmelCase ) ):
A__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A__ = 0
# set the finish time
A__ = self.current_time
# update the process' turnaround time because it is finished
A__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCAmelCase )
self.finish_queue.extend(__lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a_ ( self : List[Any] ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
A__ , A__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A : Union[str, Any] = Process('''P1''', 0, 5_3)
A : Optional[Any] = Process('''P2''', 0, 1_7)
A : Optional[int] = Process('''P3''', 0, 6_8)
A : int = Process('''P4''', 0, 2_4)
A : Any = 3
A : List[Any] = [1_7, 2_5]
A : Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A : Optional[Any] = Process('''P1''', 0, 5_3)
A : int = Process('''P2''', 0, 1_7)
A : Optional[int] = Process('''P3''', 0, 6_8)
A : Tuple = Process('''P4''', 0, 2_4)
A : Union[str, Any] = 3
A : Optional[Any] = [1_7, 2_5]
A : Tuple = deque([Pa, Pa, Pa, Pa])
A : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
A : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 274 | 1 |
"""simple docstring"""
_a : List[str] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_a : Any = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Optional[Any] = from_type.lower().strip("""s""" )
_lowerCAmelCase : List[str] = to_type.lower().strip("""s""" )
_lowerCAmelCase : List[Any] = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = UNIT_SYMBOL.get(_lowerCamelCase ,_lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : List[Any] = (
f"Invalid \'from_type\' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
_lowerCAmelCase : Dict = (
f"Invalid \'to_type\' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : Dict = METRIC_CONVERSION[from_sanitized]
_lowerCAmelCase : List[Any] = METRIC_CONVERSION[to_sanitized]
_lowerCAmelCase : Any = 1
if from_exponent > to_exponent:
_lowerCAmelCase : Tuple = from_exponent - to_exponent
else:
_lowerCAmelCase : List[str] = -(to_exponent - from_exponent)
return value * pow(10 ,_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 364 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 126 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
UpperCamelCase__ : str = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 1_28,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TOKEN
HfFolder.save_token(lowercase__ )
@classmethod
def UpperCamelCase__ ( cls : str ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase__ , repo_id="""test-config""" , push_to_hub=lowercase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase__ , repo_id="""valid_org/test-config-org""" , push_to_hub=lowercase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE : Dict = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase__ , getattr(lowercase__ , lowercase__ ) )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
__SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
__SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(F"{USER}/test-dynamic-config" , trust_remote_code=lowercase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__SCREAMING_SNAKE_CASE : str = c.n_embd + 1 # int
__SCREAMING_SNAKE_CASE : Tuple = c.resid_pdrop + 1.0 # float
__SCREAMING_SNAKE_CASE : Union[str, Any] = not c.scale_attn_weights # bool
__SCREAMING_SNAKE_CASE : Optional[Any] = c.summary_type + """foo""" # str
c.update_from_string(
F"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(lowercase__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(lowercase__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(lowercase__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(lowercase__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = PretrainedConfig()
__SCREAMING_SNAKE_CASE : Optional[Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
__SCREAMING_SNAKE_CASE : Optional[int] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase__ , lowercase__ )]
if len(lowercase__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F" {', '.join(lowercase__ )}." )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(lowercase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__SCREAMING_SNAKE_CASE : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(lowercase__ )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = mock.Mock()
__SCREAMING_SNAKE_CASE : List[Any] = 5_0_0
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Dict = HTTPError
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowercase__ ) as mock_head:
__SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("""bert-base-cased""" )
__SCREAMING_SNAKE_CASE : List[str] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowercase__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__SCREAMING_SNAKE_CASE : List[Any] = ["""config.42.0.0.json"""]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 7_6_8
configuration.save_pretrained(lowercase__ )
shutil.move(os.path.join(lowercase__ , """config.4.0.0.json""" ) , os.path.join(lowercase__ , """config.42.0.0.json""" ) )
__SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
__SCREAMING_SNAKE_CASE : Dict = """v4.0.0"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase__ , return_unused_kwargs=lowercase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__SCREAMING_SNAKE_CASE : Any = """v3.0.0"""
__SCREAMING_SNAKE_CASE : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 ) | 112 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__UpperCamelCase : List[Any] = logging.getLogger(__name__)
__UpperCamelCase : Union[str, Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowercase__ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "The name of the task to train on."} , )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowercase__ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowercase__ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowercase__ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase__ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowercase__ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowercase__ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowercase__ = dataclasses.field(
default=a_ , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : List[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCAmelCase__ : List[Any] = dataset.filter(lambda A_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCAmelCase__ : Any = int(eval_result * len(A_ ) )
print(A_ )
lowerCAmelCase__ : Optional[int] = dataset.sort('''probability''' , reverse=A_ )
lowerCAmelCase__ : Optional[Any] = dataset.select(range(A_ ) )
lowerCAmelCase__ : Tuple = dataset.remove_columns(['''label''', '''probability'''] )
lowerCAmelCase__ : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
lowerCAmelCase__ : List[Any] = dataset.map(lambda A_ : {"label": idalabel[example["label"]]} )
lowerCAmelCase__ : Optional[Any] = dataset.shuffle(seed=args.seed )
lowerCAmelCase__ : Tuple = os.path.join(A_ , f'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(A_ , index=A_ )
else:
dataset.to_json(A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , **A_ ):
lowerCAmelCase__ : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase__ : int = STModelArguments(model_name_or_path=A_ )
lowerCAmelCase__ : int = STDataArguments(train_file=A_ , infer_file=A_ )
lowerCAmelCase__ : int = STTrainingArguments(output_dir=A_ )
lowerCAmelCase__ : List[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A_ ).items():
setattr(A_ , A_ , A_ )
for key, value in kwargs.items():
if hasattr(A_ , A_ ):
setattr(A_ , A_ , A_ )
# Sanity checks
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Optional[Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCAmelCase__ : Union[str, Any] = args.train_file
lowerCAmelCase__ : int = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCAmelCase__ : Any = args.eval_file
for key in data_files:
lowerCAmelCase__ : Tuple = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
lowerCAmelCase__ : Optional[Any] = extension
else:
assert extension == args.data_file_extension, f'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), f'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCAmelCase__ : Any = f'{args.output_dir}/self-train_iter-{{}}'.format
lowerCAmelCase__ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A_ )
os.makedirs(A_ , exist_ok=A_ )
accelerator.wait_for_everyone()
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : List[str] = False
# Show the progress bar
lowerCAmelCase__ : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCAmelCase__ : Union[str, Any] = data_dir_format(A_ )
assert os.path.exists(A_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCAmelCase__ : int = os.path.join(A_ , '''stage-1''' )
lowerCAmelCase__ : Optional[Any] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A_ , A_ ):
arguments_dict.update({key: value} )
lowerCAmelCase__ : Optional[Any] = os.path.join(A_ , '''best-checkpoint''' , A_ )
if os.path.exists(A_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A_ , A_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A_ )
finetune(**A_ )
accelerator.wait_for_everyone()
assert os.path.exists(A_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCAmelCase__ : Optional[int] = os.path.join(A_ , '''best-checkpoint''' )
lowerCAmelCase__ : List[Any] = os.path.join(A_ , '''stage-2''' )
# Update arguments_dict
lowerCAmelCase__ : Optional[int] = model_path
lowerCAmelCase__ : List[Any] = data_files['''train''']
lowerCAmelCase__ : Optional[Any] = current_output_dir
lowerCAmelCase__ : Optional[int] = os.path.join(A_ , '''best-checkpoint''' , A_ )
if os.path.exists(A_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A_ , A_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A_ )
finetune(**A_ )
accelerator.wait_for_everyone()
assert os.path.exists(A_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A_ )
lowerCAmelCase__ : Optional[Any] = iteration
lowerCAmelCase__ : List[str] = data_dir_format(iteration + 1 )
lowerCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained(os.path.join(A_ , '''best-checkpoint''' ) )
lowerCAmelCase__ : Tuple = config.idalabel
lowerCAmelCase__ : Optional[int] = os.path.join(A_ , '''eval_results_best-checkpoint.json''' )
lowerCAmelCase__ : str = os.path.join(A_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A_ )
with open(A_ , '''r''' ) as f:
lowerCAmelCase__ : Tuple = float(json.load(A_ )[args.eval_metric] )
lowerCAmelCase__ : Optional[Any] = os.path.join(A_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A_ )
# Loading the dataset from local csv or json files.
lowerCAmelCase__ : Any = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowerCAmelCase__ : str = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A_ , exist_ok=A_ )
shutil.copy(A_ , os.path.join(A_ , f'eval_results_iter-{iteration}.json' ) )
if os.path.exists(A_ ):
shutil.copy(A_ , os.path.join(A_ , f'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(A_ , A_ , A_ , A_ , A_ , A_ )
accelerator.wait_for_everyone()
lowerCAmelCase__ : List[Any] = os.path.join(A_ , f'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCAmelCase__ : Optional[int] = eval_result
if best_iteration is None:
lowerCAmelCase__ : Union[str, Any] = new_iteration
lowerCAmelCase__ : List[str] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCAmelCase__ : List[str] = new_iteration
lowerCAmelCase__ : List[str] = new_eval_result
lowerCAmelCase__ : Tuple = 0
else:
if new_eval_result == best_eval_result:
lowerCAmelCase__ : Optional[int] = new_iteration
lowerCAmelCase__ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCAmelCase__ : Any = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A_ , f'eval_results_iter-{iteration}.json' ) , os.path.join(A_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A_ , f'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(A_ , '''eval_results_best-iteration.json''' ) , )
| 371 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '''▁'''
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__UpperCamelCase : Optional[Any] = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : Optional[int]="</s>" ,lowercase_ : List[Any]="<unk>" ,lowercase_ : Optional[Any]=[] ,lowercase_ : Optional[Dict[str, Any]] = None ,**lowercase_ : int ,):
lowerCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ ,unk_token=lowercase_ ,additional_special_tokens=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
lowerCAmelCase__ : str = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ):
return self.sp_model.encode(lowercase_ ,out_type=lowercase_ )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ):
return self.sp_model.piece_to_id(lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ):
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : List[Any] = self.sp_model.IdToPiece(lowercase_ )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase__ : Dict = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : List[Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ ,'''wb''' ) as fi:
lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 74 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
A =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def snake_case_ (_a : int , _a : Optional[int] , _a : int , _a : List[Any] , _a : Tuple ):
for attribute in key.split('''.''' ):
UpperCAmelCase = getattr(_a , _a )
if weight_type is not None:
UpperCAmelCase = getattr(_a , _a ).shape
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "running_mean":
UpperCAmelCase = value
elif weight_type == "running_var":
UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase = value
elif weight_type == "inv_freq":
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ (_a : List[Any] , _a : int , _a : Dict ):
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(_a )[0].split('''.''' )[-2]
UpperCAmelCase = mapped_key.replace('''*''' , _a )
if "pos_bias_u" in name:
UpperCAmelCase = None
elif "pos_bias_v" in name:
UpperCAmelCase = None
elif "weight_g" in name:
UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase = '''weight_v'''
elif "bias" in name:
UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = '''weight'''
elif "running_mean" in name:
UpperCAmelCase = '''running_mean'''
elif "inv_freq" in name:
UpperCAmelCase = '''inv_freq'''
elif "running_var" in name:
UpperCAmelCase = '''running_var'''
elif "num_batches_tracked" in name:
UpperCAmelCase = '''num_batches_tracked'''
else:
UpperCAmelCase = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ (_a : Any , _a : int , _a : str , _a : Any , _a : Dict ):
UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase = name.split('''.''' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def snake_case_ (_a : List[Any] , _a : int , _a : Any=None , _a : List[str]=None , _a : List[Any]=True ):
if config_path is not None:
UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(_a , hidden_act='''swish''' )
else:
UpperCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCAmelCase = '''rotary'''
if is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(_a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(_a , '''vocab.json''' )
if not os.path.isdir(_a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_a ) )
return
os.makedirs(_a , exist_ok=_a )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_a , _a )
UpperCAmelCase = WavaVecaCTCTokenizer(
_a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_a , )
UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_a , return_attention_mask=_a , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=_a , tokenizer=_a )
processor.save_pretrained(_a )
UpperCAmelCase = WavaVecaConformerForCTC(_a )
else:
UpperCAmelCase = WavaVecaConformerForPreTraining(_a )
if is_finetuned:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase = fairseq.tasks.setup_task(_a )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_a )
UpperCAmelCase = model[0].eval()
recursively_load_weights(_a , _a , not is_finetuned )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 34 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
snake_case_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(SCREAMING_SNAKE_CASE )] )
snake_case_ = np.array(SCREAMING_SNAKE_CASE )
snake_case_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , SCREAMING_SNAKE_CASE ) ) , x.transpose() ) , SCREAMING_SNAKE_CASE )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
snake_case_ = (1, 2, 1)
snake_case_ = (1, 1, 0, 7)
snake_case_ = SARIMAX(
SCREAMING_SNAKE_CASE , exog=SCREAMING_SNAKE_CASE , order=SCREAMING_SNAKE_CASE , seasonal_order=SCREAMING_SNAKE_CASE )
snake_case_ = model.fit(disp=SCREAMING_SNAKE_CASE , maxiter=600 , method='''nm''' )
snake_case_ = model_fit.predict(1 , len(SCREAMING_SNAKE_CASE ) , exog=[test_match] )
return result[0]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
snake_case_ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case_ = regressor.predict(SCREAMING_SNAKE_CASE )
return y_pred[0]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> float:
"""simple docstring"""
train_user.sort()
snake_case_ = np.percentile(SCREAMING_SNAKE_CASE , 25 )
snake_case_ = np.percentile(SCREAMING_SNAKE_CASE , 75 )
snake_case_ = qa - qa
snake_case_ = qa - (iqr * 0.1)
return low_lim
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> bool:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 0
for i in list_vote:
if i > actual_result:
snake_case_ = not_safe + 1
else:
if abs(abs(SCREAMING_SNAKE_CASE ) - abs(SCREAMING_SNAKE_CASE ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCAmelCase = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
UpperCAmelCase = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
UpperCAmelCase = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCAmelCase = normalize_df[:, 2].tolist()
UpperCAmelCase = normalize_df[:, 0].tolist()
UpperCAmelCase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCAmelCase = normalize_df[:, [1, 2]].tolist()
UpperCAmelCase = x[: len(x) - 1]
UpperCAmelCase = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCAmelCase = total_date[: len(total_date) - 1]
UpperCAmelCase = total_user[: len(total_user) - 1]
UpperCAmelCase = total_match[: len(total_match) - 1]
UpperCAmelCase = total_date[len(total_date) - 1 :]
UpperCAmelCase = total_user[len(total_user) - 1 :]
UpperCAmelCase = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCAmelCase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCAmelCase = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""") | 267 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 1:
snake_case_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE )
snake_case_ = 1
for i in range(1 , SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 267 | 1 |
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
return "".join([hex(UpperCAmelCase )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase )] )
def _a ( UpperCAmelCase ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(UpperCAmelCase ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowerCAmelCase_ ):
_UpperCAmelCase : int = ["transformers", "torch", "note_seq"]
def __init__( self : Union[str, Any] , *A : int , **A : Optional[int] ) ->str:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCamelCase ( cls : Any , *A : Optional[int] , **A : int ) ->int:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCamelCase ( cls : List[Any] , *A : List[str] , **A : str ) ->Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 142 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
# Load configuration defined in the metadata file
with open(__UpperCamelCase ) as metadata_file:
_lowerCAmelCase =json.load(__UpperCamelCase )
_lowerCAmelCase =LukeConfig(use_entity_aware_attention=__UpperCamelCase , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_lowerCAmelCase =load_original_entity_vocab(__UpperCamelCase )
# add an entry for [MASK2]
_lowerCAmelCase =max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCAmelCase =XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCAmelCase =AddedToken("""<ent>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
_lowerCAmelCase =AddedToken("""<ent2>""" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """r""" ) as f:
_lowerCAmelCase =json.load(__UpperCamelCase )
_lowerCAmelCase ="""MLukeTokenizer"""
with open(os.path.join(__UpperCamelCase , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
with open(os.path.join(__UpperCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_lowerCAmelCase =tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_lowerCAmelCase =state_dict["""embeddings.word_embeddings.weight"""]
_lowerCAmelCase =word_emb[ent_init_index].unsqueeze(0 )
_lowerCAmelCase =word_emb[enta_init_index].unsqueeze(0 )
_lowerCAmelCase =torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCAmelCase =state_dict[bias_name]
_lowerCAmelCase =decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCAmelCase =decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCAmelCase =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCAmelCase =F'''encoder.layer.{layer_index}.attention.self.'''
_lowerCAmelCase =state_dict[prefix + matrix_name]
_lowerCAmelCase =state_dict[prefix + matrix_name]
_lowerCAmelCase =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCAmelCase =state_dict["""entity_embeddings.entity_embeddings.weight"""]
_lowerCAmelCase =entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase =torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCAmelCase =state_dict["""entity_predictions.bias"""]
_lowerCAmelCase =entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_lowerCAmelCase =torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCAmelCase =LukeForMaskedLM(config=__UpperCamelCase ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_lowerCAmelCase =OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_lowerCAmelCase =state_dict[key]
else:
_lowerCAmelCase =state_dict[key]
_lowerCAmelCase , _lowerCAmelCase =model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if set(__UpperCamelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__UpperCamelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase , task="""entity_classification""" )
_lowerCAmelCase ="""ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_lowerCAmelCase =(0, 9)
_lowerCAmelCase =tokenizer(__UpperCamelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase =model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase =torch.Size((1, 33, 768) )
_lowerCAmelCase =torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCAmelCase =torch.Size((1, 1, 768) )
_lowerCAmelCase =torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCAmelCase =MLukeTokenizer.from_pretrained(__UpperCamelCase )
_lowerCAmelCase ="""Tokyo is the capital of <mask>."""
_lowerCAmelCase =(24, 30)
_lowerCAmelCase =tokenizer(__UpperCamelCase , entity_spans=[span] , return_tensors="""pt""" )
_lowerCAmelCase =model(**__UpperCamelCase )
_lowerCAmelCase =encoding["""input_ids"""][0].tolist()
_lowerCAmelCase =input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_lowerCAmelCase =outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCamelCase )
_lowerCAmelCase =outputs.entity_logits[0][0].argmax().item()
_lowerCAmelCase =[
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
_lowerCAmelCase =["""[MASK]""", """[PAD]""", """[UNK]"""]
_lowerCAmelCase =[json.loads(__UpperCamelCase ) for line in open(__UpperCamelCase )]
_lowerCAmelCase ={}
for entry in data:
_lowerCAmelCase =entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCAmelCase =entity_id
break
_lowerCAmelCase =F'''{language}:{entity_name}'''
_lowerCAmelCase =entity_id
return new_mapping
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__A = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 341 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple:
_lowerCAmelCase =compute_bleu(
reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 341 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_enforce_args(lowerCamelCase__ , lowerCamelCase__ )
if n == 0:
return 0
A_ : Union[str, Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
A_ : Tuple = max(
lowerCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCamelCase__ ) )
return max_revue
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_enforce_args(lowerCamelCase__ , lowerCamelCase__ )
A_ : Union[str, Any] = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
A_ : Union[str, Any] = float("""-inf""" )
for i in range(1 , n + 1 ):
A_ : int = max(
lowerCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCamelCase__ , lowerCamelCase__ ) , )
A_ : Union[str, Any] = max_revenue
return max_rev[n]
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
_enforce_args(lowerCamelCase__ , lowerCamelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
A_ : Any = [float("""-inf""" ) for _ in range(n + 1 )]
A_ : Tuple = 0
for i in range(1 , n + 1 ):
A_ : Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
A_ : Union[str, Any] = max(lowerCamelCase__ , prices[j - 1] + max_rev[i - j] )
A_ : Tuple = max_revenue_i
return max_rev[n]
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if n < 0:
A_ : Optional[Any] = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowerCamelCase__ )
if n > len(lowerCamelCase__ ):
A_ : List[str] = (
"""Each integral piece of rod must have a corresponding price. """
f'Got n = {n} but length of prices = {len(lowerCamelCase__ )}'
)
raise ValueError(lowerCamelCase__ )
def a ( ):
'''simple docstring'''
A_ : List[Any] = [6, 10, 12, 15, 20, 23]
A_ : Dict = len(lowerCamelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
A_ : Optional[int] = 36
A_ : Any = top_down_cut_rod(lowerCamelCase__ , lowerCamelCase__ )
A_ : str = bottom_up_cut_rod(lowerCamelCase__ , lowerCamelCase__ )
A_ : Union[str, Any] = naive_cut_rod_recursive(lowerCamelCase__ , lowerCamelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 206 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not sentence:
return ""
A_ : Optional[int] = dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod() | 206 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_lowerCamelCase : Any = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCamelCase : Any = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = '''sgugger/tiny-distilbert-classification'''
_lowerCamelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , only_pretrain_model=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCamelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCamelCase : int = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCamelCase : int = TensorFlowBenchmark(__lowerCAmelCase , [config] )
_lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : int = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = TensorFlowBenchmark(__lowerCAmelCase , [config] )
_lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCamelCase : str = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCamelCase : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : str = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : int = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCamelCase : List[Any] = TensorFlowBenchmark(__lowerCAmelCase , [config] )
_lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = '''patrickvonplaten/t5-tiny-random'''
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCAmelCase , )
_lowerCamelCase : Tuple = TensorFlowBenchmark(__lowerCAmelCase , configs=[config] )
_lowerCamelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = '''sshleifer/tiny-gpt2'''
_lowerCamelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCAmelCase , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCamelCase : Tuple = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCAmelCase , save_to_csv=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCAmelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__lowerCAmelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__lowerCAmelCase , '''env.csv''' ) , multi_process=__lowerCAmelCase , )
_lowerCamelCase : List[str] = TensorFlowBenchmark(__lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__lowerCAmelCase : Optional[int] ):
self.assertTrue(hasattr(__lowerCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''current''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCAmelCase , '''log.txt''' ) , log_print=__lowerCAmelCase , trace_memory_line_by_line=__lowerCAmelCase , eager_mode=__lowerCAmelCase , multi_process=__lowerCAmelCase , )
_lowerCamelCase : int = TensorFlowBenchmark(__lowerCAmelCase )
_lowerCamelCase : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowerCAmelCase , '''log.txt''' ) ).exists() )
| 175 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( A_ : Dict, A_ : Any ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : List[str] = False
elif args.student_type == "gpt2":
_lowerCamelCase : Any = False
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
_lowerCamelCase : Optional[int] = False
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''', action='''store_true''', help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''', type=A_, required=A_, help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''', type=A_, required=A_, help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''', )
parser.add_argument(
'''--student_type''', type=A_, choices=['''distilbert''', '''roberta''', '''gpt2'''], required=A_, help='''The student type (DistilBERT, RoBERTa).''', )
parser.add_argument('''--student_config''', type=A_, required=A_, help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''', default=A_, type=A_, help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''', choices=['''bert''', '''roberta''', '''gpt2'''], required=A_, help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''', type=A_, required=A_, help='''The teacher model.''' )
parser.add_argument('''--temperature''', default=2.0, type=A_, help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''', default=0.5, type=A_, help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''', default=0.0, type=A_, help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''', )
parser.add_argument('''--alpha_clm''', default=0.5, type=A_, help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''', default=0.0, type=A_, help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''', default=0.0, type=A_, help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''', action='''store_true''', help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''', default=0.15, type=A_, help='''Proportion of tokens for which we need to make a prediction.''', )
parser.add_argument('''--word_mask''', default=0.8, type=A_, help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''', default=0.1, type=A_, help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''', default=0.1, type=A_, help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''', default=0.7, type=A_, help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''', )
parser.add_argument('''--token_counts''', type=A_, help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''', action='''store_true''', help='''If true, compute the distillation loss only the [MLM] prediction distribution.''', )
parser.add_argument(
'''--freeze_pos_embs''', action='''store_true''', help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''', )
parser.add_argument(
'''--freeze_token_type_embds''', action='''store_true''', help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''', )
parser.add_argument('''--n_epoch''', type=A_, default=3, help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''', type=A_, default=5, help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''', action='''store_false''', help='''If true, group sequences that have similar length into the same batch. Default is true.''', )
parser.add_argument(
'''--gradient_accumulation_steps''', type=A_, default=50, help='''Gradient accumulation for larger training batches.''', )
parser.add_argument('''--warmup_prop''', default=0.05, type=A_, help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''', default=0.0, type=A_, help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''', default=5E-4, type=A_, help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''', default=1E-6, type=A_, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''', default=5.0, type=A_, help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''', default=0.02, type=A_, help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', )
parser.add_argument(
'''--fp16_opt_level''', type=A_, default='''O1''', help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
), )
parser.add_argument('''--n_gpu''', type=A_, default=1, help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''', type=A_, default=-1, help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''', type=A_, default=56, help='''Random seed''' )
parser.add_argument('''--log_interval''', type=A_, default=5_00, help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''', type=A_, default=40_00, help='''Checkpoint interval.''' )
_lowerCamelCase : List[Any] = parser.parse_args()
sanity_checks(A_ )
# ARGS #
init_gpu_params(A_ )
set_seed(A_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path, '''parameters.json''' ), '''w''' ) as f:
json.dump(vars(A_ ), A_, indent=4 )
git_log(args.dump_path )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = MODEL_CLASSES[args.student_type]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCamelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCamelCase : Optional[int] = tokenizer.all_special_tokens.index(A_ )
_lowerCamelCase : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCamelCase : Optional[Any] = special_tok_ids
_lowerCamelCase : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file, '''rb''' ) as fp:
_lowerCamelCase : Any = pickle.load(A_ )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts, '''rb''' ) as fp:
_lowerCamelCase : str = pickle.load(A_ )
_lowerCamelCase : List[Any] = np.maximum(A_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCamelCase : List[Any] = 0.0 # do not predict special tokens
_lowerCamelCase : str = torch.from_numpy(A_ )
else:
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = LmSeqsDataset(params=A_, data=A_ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCamelCase : str = student_config_class.from_pretrained(args.student_config )
_lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCamelCase : Dict = student_model_class.from_pretrained(args.student_pretrained_weights, config=A_ )
else:
_lowerCamelCase : Optional[Any] = student_model_class(A_ )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
_lowerCamelCase : int = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=A_ )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A_, A_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A_, A_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCamelCase : Optional[int] = Distiller(
params=A_, dataset=A_, token_probs=A_, student=A_, teacher=A_ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 175 | 1 |
from ... import PretrainedConfig
snake_case : str = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class snake_case_ (a_ ):
UpperCAmelCase__ : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCAmelCase__ : Dict = '''nezha'''
def __init__( self :Tuple ,__snake_case :int=2_11_28 ,__snake_case :Union[str, Any]=7_68 ,__snake_case :Union[str, Any]=12 ,__snake_case :int=12 ,__snake_case :int=30_72 ,__snake_case :Dict="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Any=0.1 ,__snake_case :str=5_12 ,__snake_case :Any=64 ,__snake_case :Union[str, Any]=2 ,__snake_case :List[str]=0.02 ,__snake_case :str=1E-12 ,__snake_case :Dict=0.1 ,__snake_case :str=0 ,__snake_case :str=2 ,__snake_case :List[Any]=3 ,__snake_case :Any=True ,**__snake_case :Optional[int] ,) -> List[Any]:
super().__init__(pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A ,**_A )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = max_relative_position
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = classifier_dropout
a__ = use_cache
| 240 |
import cva
import numpy as np
class A_ :
def __init__( self , _A , _A ):
'''simple docstring'''
if k in (0.04, 0.06):
UpperCAmelCase = k
UpperCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ):
'''simple docstring'''
return str(self.k )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = cva.imread(_A , 0 )
UpperCAmelCase , UpperCAmelCase = img.shape
UpperCAmelCase = []
UpperCAmelCase = img.copy()
UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
UpperCAmelCase , UpperCAmelCase = np.gradient(_A )
UpperCAmelCase = dx**2
UpperCAmelCase = dy**2
UpperCAmelCase = dx * dy
UpperCAmelCase = 0.04
UpperCAmelCase = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
UpperCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase = (wxx * wyy) - (wxy**2)
UpperCAmelCase = wxx + wyy
UpperCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
__A : Tuple = HarrisCorner(0.04, 3)
__A , __A : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 273 | 0 |
import os
from collections.abc import Iterator
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip("./" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return f'''{i * " "}*''' if i else "\n##"
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
lowercase__ = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = os.path.split(SCREAMING_SNAKE_CASE_ )
if filepath != old_path:
lowercase__ = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'''{filepath}/{filename}'''.replace(" " , "%20" )
lowercase__ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 367 |
from __future__ import annotations
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return len(set(SCREAMING_SNAKE_CASE_ ) ) == len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _A ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : str = min_resolution
SCREAMING_SNAKE_CASE_ : Any = max_resolution
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size
SCREAMING_SNAKE_CASE_ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Tuple = image_mean
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE_ : Any = do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_pad
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE_ : int = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Dict = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ : List[str] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ : int = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : str = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ : Any = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : List[Any] = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
SCREAMING_SNAKE_CASE_ : str = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : List[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DetaImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Any = {'image_id': 3_9769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ : List[Any] = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : Dict = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : str = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ : Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ : List[Any] = DetaImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
SCREAMING_SNAKE_CASE_ : int = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 253 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase : List[Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = dataset
SCREAMING_SNAKE_CASE_ : List[Any] = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start_length
SCREAMING_SNAKE_CASE_ : Any = eof_strings
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = re.split('(%s)' % '|'.join(a ) , a )
# last string should be ""
return "".join(string_list[:-2] )
def A_ ( a , a , a , a , a , a=2_0 , **a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = defaultdict(a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch['ids'].shape[-1]
SCREAMING_SNAKE_CASE_ : str = accelerator.unwrap_model(a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=a , **a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch['task_id'].repeat(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.pad_across_processes(
a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a , a ):
gen_token_dict[task].append(a )
SCREAMING_SNAKE_CASE_ : str = [[] for _ in range(a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
code_gens[task].append(remove_last_block(a ) )
return code_gens
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser(a )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : List[str] = 'false'
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
set_seed(args.seed , device_specific=a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : Any = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , a , a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('openai_humaneval' )
SCREAMING_SNAKE_CASE_ : str = load_metric('code_eval' )
SCREAMING_SNAKE_CASE_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
SCREAMING_SNAKE_CASE_ : Any = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : int = TokenizedDataset(a , human_eval['test'] , n_copies=a , n_tasks=a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(a , a )
SCREAMING_SNAKE_CASE_ : List[str] = complete_code(
a , a , a , a , n_tasks=a , batch_size=args.batch_size , **a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : str = []
for task in tqdm(range(a ) ):
SCREAMING_SNAKE_CASE_ : str = human_eval['test'][task]['test']
SCREAMING_SNAKE_CASE_ : int = f"check({human_eval['test'][task]['entry_point']})"
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(
references=a , predictions=a , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(a , a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 253 | 1 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase : Dict = datasets.logging.get_logger(__name__)
lowerCAmelCase : Any = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
lowerCAmelCase : int = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
lowerCAmelCase : List[Any] = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def a__ ( snake_case__ , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__="dummy_doc" ) -> str:
lowerCamelCase = {doc: key_lines}
lowerCamelCase = {doc: sys_lines}
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
lowerCamelCase = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
lowerCamelCase = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase = {}
lowerCamelCase = 0
lowerCamelCase = 0
for name, metric in metrics:
lowerCamelCase = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 1_00:.2f}' , F' Precision: {precision * 1_00:.2f}' , F' F1: {fa * 1_00:.2f}' , )
if conll_subparts_num == 3:
lowerCamelCase = (conll / 3) * 1_00
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def a__ ( snake_case__ ) -> str:
lowerCamelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowerCamelCase = line.split()[5]
if not parse_col == "-":
lowerCamelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowerCAmelCase ( self , _a , _a , _a=True , _a=False , _a=False , _a=False ):
"""simple docstring"""
lowerCamelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowerCamelCase = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase = evaluate(
key_lines=SCREAMING_SNAKE_CASE_ , sys_lines=SCREAMING_SNAKE_CASE_ , metrics=SCREAMING_SNAKE_CASE_ , NP_only=SCREAMING_SNAKE_CASE_ , remove_nested=SCREAMING_SNAKE_CASE_ , keep_singletons=SCREAMING_SNAKE_CASE_ , min_span=SCREAMING_SNAKE_CASE_ , )
return score
| 366 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = False, False, False
@dataclass
class __magic_name__ :
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = None
# Automatically constructed
__UpperCamelCase = "dict"
__UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
__UpperCamelCase = field(default="Audio" , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(_a , _a ):
return {"bytes": None, "path": value}
elif isinstance(_a , _a ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCamelCase = BytesIO()
sf.write(_a , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCamelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowerCamelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767
lowerCamelCase = BytesIO(bytes() )
sf.write(_a , _a , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
lowerCamelCase , lowerCamelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
lowerCamelCase = xsplitext(_a )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
lowerCamelCase = token_per_repo_id or {}
lowerCamelCase = path.split("""::""" )[-1]
try:
lowerCamelCase = string_to_dict(_a , config.HUB_DATASETS_URL )["""repo_id"""]
lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCamelCase = None
with xopen(_a , """rb""" , use_auth_token=_a ) as f:
lowerCamelCase , lowerCamelCase = sf.read(_a )
else:
lowerCamelCase , lowerCamelCase = sf.read(_a )
lowerCamelCase = array.T
if self.mono:
lowerCamelCase = librosa.to_mono(_a )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCamelCase = librosa.resample(_a , orig_sr=_a , target_sr=self.sampling_rate )
lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCAmelCase ( self ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
lowerCamelCase = pa.array([Audio().encode_example(_a ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowerCamelCase = storage.field("""bytes""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowerCamelCase = storage.field("""path""" )
else:
lowerCamelCase = pa.array([None] * len(_a ) , type=pa.string() )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_a , self.pa_type )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_a ):
with xopen(_a , """rb""" ) as f:
lowerCamelCase = f.read()
return bytes_
lowerCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase = pa.array(
[os.path.basename(_a ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_a , self.pa_type )
| 168 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = VQModel
lowerCamelCase__ = """sample"""
@property
def A_ ( self , lowercase=(32, 32) ):
_lowerCamelCase : str = 4
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
return {"sample": image}
@property
def A_ ( self ):
return (3, 32, 32)
@property
def A_ ( self ):
return (3, 32, 32)
def A_ ( self ):
_lowerCamelCase : List[str] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
_lowerCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : List[str] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowercase )
_lowerCamelCase : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_lowerCamelCase : Any = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_lowerCamelCase : List[str] = image.to(lowercase )
with torch.no_grad():
_lowerCamelCase : int = model(lowercase ).sample
_lowerCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCamelCase : Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) ) | 96 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Path , snake_case_ : str = None , snake_case_ : str = None , snake_case_ : str = None , ) ->List[Any]:
if config_name_or_path is None:
lowerCamelCase__ : Dict ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =question_encoder_name_or_path
lowerCamelCase__ : Optional[Any] =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ : Union[str, Any] =RagConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[int] =gen_config
lowerCamelCase__ : str =question_encoder_config
lowerCamelCase__ : str =model_class.from_pretrained_question_encoder_generator(
snake_case_ , snake_case_ , config=snake_case_ )
rag_model.save_pretrained(snake_case_ )
# Sanity check.
model_class.from_pretrained(snake_case_ )
# Save tokenizers.
lowerCamelCase__ : str =AutoTokenizer.from_pretrained(snake_case_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(snake_case_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 126 | 0 |
import string
from math import logaa
def snake_case (A_ :str , A_ :str ):
'''simple docstring'''
a : Tuple = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
a : Optional[int] = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case (A_ :str , A_ :str ):
'''simple docstring'''
a : Optional[Any] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
a : Dict = corpus_without_punctuation.split('\n' )
a : Tuple = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A_ ))
def snake_case (A_ :int , A_ :int , A_ :List[str]=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def snake_case (A_ :int , A_ :int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 363 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_UpperCamelCase : int = logging.get_logger(__name__)
class snake_case ( UpperCAmelCase ):
__magic_name__ = ['''input_features''', '''attention_mask''']
def __init__( self : Optional[int] , A : Optional[Any]=8_0 , A : str=1_6_0_0_0 , A : List[str]=0.0 , A : Any=1_0 , A : Union[str, Any]=2_5 , A : str="hamming_window" , A : str=3_27_68.0 , A : Union[str, Any]=0.97 , A : Dict=1.0 , A : Any=True , A : Union[str, Any]=True , A : List[Any]=False , **A : Tuple , ):
'''simple docstring'''
super().__init__(feature_size=A , sampling_rate=A , padding_value=A , **A )
a : Any = feature_size
a : List[Any] = sampling_rate
a : Any = padding_value
a : str = hop_length
a : Any = win_length
a : List[Any] = frame_signal_scale
a : Tuple = preemphasis_coeff
a : Dict = mel_floor
a : Optional[int] = normalize_means
a : List[str] = normalize_vars
a : Dict = win_function
a : Union[str, Any] = return_attention_mask
a : List[Any] = win_length * sampling_rate // 1_0_0_0
a : Tuple = hop_length * sampling_rate // 1_0_0_0
a : List[Any] = optimal_fft_length(self.sample_size )
a : Any = (self.n_fft // 2) + 1
def lowerCamelCase__ ( self : List[Any] , A : np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
a : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=A )
else:
a : Dict = window_function(window_length=self.sample_size , name=self.win_function )
a : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
a : List[Any] = spectrogram(
one_waveform * self.frame_signal_scale , window=A , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=A , preemphasis=self.preemphasis_coeff , mel_filters=A , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def lowerCamelCase__ ( self : int , A : Tuple , A : int , A : Optional[int] ):
'''simple docstring'''
if self.normalize_means:
a : Any = x[:input_length].mean(axis=0 )
a : Dict = np.subtract(A , A )
if self.normalize_vars:
a : Dict = x[:input_length].std(axis=0 )
a : Dict = np.divide(A , A )
if input_length < x.shape[0]:
a : Dict = padding_value
# make sure array is in float32
a : Optional[int] = x.astype(np.floataa )
return x
def lowerCamelCase__ ( self : str , A : List[np.ndarray] , A : Optional[np.ndarray] = None ):
'''simple docstring'''
a : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(A , A , self.padding_value ) for x, n in zip(A , A )]
def __call__( self : Dict , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Union[bool, str, PaddingStrategy] = False , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[int] = None , **A : Union[str, Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a : Optional[int] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a : Dict = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a : str = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
a : List[str] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a : Any = [raw_speech]
# extract fbank features
a : str = [self._extract_mfsc_features(A ) for one_waveform in raw_speech]
# convert into correct format for padding
a : int = BatchFeature({'input_features': features} )
a : Union[str, Any] = self.pad(
A , padding=A , max_length=A , truncation=A , pad_to_multiple_of=A , return_attention_mask=A , **A , )
# make sure list is in array format
a : Optional[Any] = padded_inputs.get('input_features' )
if isinstance(input_features[0] , A ):
a : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
a : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
a : int = [np.asarray(A , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
a : Any = (
np.array(A , dtype=np.intaa )
if self._get_padding_strategies(A , max_length=A ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
a : List[str] = self.normalize(
padded_inputs['input_features'] , attention_mask=A )
if return_tensors is not None:
a : Optional[int] = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 186 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowercase_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class snake_case ( _lowercase ):
'''simple docstring'''
A_ : bool = field(default=_lowercase , metadata={"help": "Whether to use SortishSampler or not."} )
A_ : bool = field(
default=_lowercase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
A_ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
A_ : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
A_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowercase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = super().to_dict()
for k, v in d.items():
if isinstance(A_, A_ ):
__A = v.to_dict()
return d
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Any , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict ) -> Optional[int]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= eval_examples
UpperCAmelCase_= post_process_function
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_= (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_= gen_kwargs
UpperCAmelCase_= self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_= self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase_= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_= self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : List[str] ) -> Tuple:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , """predict""" )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 351 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_= DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCAmelCase ):
UpperCAmelCase_= (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_= (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , eta=__UpperCAmelCase , use_clipped_model_output=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 277 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a : Optional[int] = logging.get_logger(__name__)
# General docstring
a : Union[str, Any] = '''MobileNetV1Config'''
# Base docstring
a : str = '''google/mobilenet_v1_1.0_224'''
a : str = [1, 1024, 7, 7]
# Image classification docstring
a : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
a : Optional[int] = '''tabby, tabby cat'''
a : List[str] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : str , _lowercase : int=None ) ->int:
'''simple docstring'''
a : List[Any] = {}
if isinstance(_lowercase , _lowercase ):
a : Union[str, Any] = model.mobilenet_va
else:
a : List[str] = model
a : Dict = "MobilenetV1/Conv2d_0/"
a : Tuple = backbone.conv_stem.convolution.weight
a : Dict = backbone.conv_stem.normalization.bias
a : Optional[Any] = backbone.conv_stem.normalization.weight
a : Optional[Any] = backbone.conv_stem.normalization.running_mean
a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
a : List[str] = i + 1
a : Dict = i * 2
a : int = backbone.layer[pt_index]
a : List[str] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
a : int = pointer.convolution.weight
a : Union[str, Any] = pointer.normalization.bias
a : Union[str, Any] = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : Dict = pointer.normalization.running_var
a : List[Any] = backbone.layer[pt_index + 1]
a : Union[str, Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
a : Dict = pointer.convolution.weight
a : Optional[Any] = pointer.normalization.bias
a : Dict = pointer.normalization.weight
a : Optional[Any] = pointer.normalization.running_mean
a : Optional[Any] = pointer.normalization.running_var
if isinstance(_lowercase , _lowercase ):
a : Dict = "MobilenetV1/Logits/Conv2d_1c_1x1/"
a : Tuple = model.classifier.weight
a : Optional[int] = model.classifier.bias
return tf_to_pt_map
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[Any] , _lowercase : Tuple ) ->int:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
a : List[Any] = tf.train.list_variables(_lowercase )
a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
a : Union[str, Any] = tf.train.load_variable(_lowercase , _lowercase )
a : Optional[Any] = array
# Build TF to PyTorch weights loading map
a : Tuple = _build_tf_to_pytorch_map(_lowercase , _lowercase , _lowercase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
a : List[str] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
a : List[Any] = np.transpose(_lowercase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
a : Union[str, Any] = array.squeeze().transpose()
else:
a : Any = np.transpose(_lowercase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
a : str = torch.from_numpy(_lowercase )
tf_weights.pop(_lowercase , _lowercase )
tf_weights.pop(name + "/RMSProp" , _lowercase )
tf_weights.pop(name + "/RMSProp_1" , _lowercase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _lowercase )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _SCREAMING_SNAKE_CASE ( _lowercase : torch.Tensor , _lowercase : nn.Convad ) ->torch.Tensor:
'''simple docstring'''
a, a : Any = features.shape[-2:]
a, a : Dict = conv_layer.stride
a, a : int = conv_layer.kernel_size
if in_height % stride_height == 0:
a : Tuple = max(kernel_height - stride_height , 0 )
else:
a : Optional[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
a : Optional[Any] = max(kernel_width - stride_width , 0 )
else:
a : str = max(kernel_width - (in_width % stride_width) , 0 )
a : Any = pad_along_width // 2
a : List[str] = pad_along_width - pad_left
a : List[str] = pad_along_height // 2
a : List[Any] = pad_along_height - pad_top
a : int = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowercase , _lowercase , "constant" , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> None:
super().__init__()
a : str = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
a : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
a : Tuple = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
a : Optional[int] = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
a : int = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[int] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__ ):
a : Dict = ACTaFN[config.hidden_act]
else:
a : Union[str, Any] = config.hidden_act
else:
a : int = None
def __a ( self , lowerCAmelCase__ ) -> torch.Tensor:
if self.config.tf_padding:
a : Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution )
a : List[str] = self.convolution(lowerCAmelCase__ )
if self.normalization is not None:
a : int = self.normalization(lowerCAmelCase__ )
if self.activation is not None:
a : Dict = self.activation(lowerCAmelCase__ )
return features
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =MobileNetVaConfig
lowerCamelCase : str =load_tf_weights_in_mobilenet_va
lowerCamelCase : List[str] ="""mobilenet_v1"""
lowerCamelCase : Tuple ="""pixel_values"""
lowerCamelCase : Optional[Any] =False
def __a ( self , lowerCAmelCase__ ) -> None:
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a : Optional[Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True ) -> List[str]:
super().__init__(lowerCAmelCase__ )
a : Tuple = config
a : Dict = 32
a : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
a : Dict = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
a : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
a : int = nn.ModuleList()
for i in range(13 ):
a : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
a : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ) )
a : Tuple = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
a : List[str] = self.conv_stem(lowerCAmelCase__ )
a : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
a : List[Any] = layer_module(lowerCAmelCase__ )
if output_hidden_states:
a : Optional[Any] = all_hidden_states + (hidden_states,)
a : Any = hidden_states
if self.pooler is not None:
a : Union[str, Any] = torch.flatten(self.pooler(lowerCAmelCase__ ) , start_dim=1 )
else:
a : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , a__ , )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ ) -> None:
super().__init__(lowerCAmelCase__ )
a : int = config.num_labels
a : List[Any] = MobileNetVaModel(lowerCAmelCase__ )
a : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
a : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__ )
a : str = nn.Linear(lowerCAmelCase__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : Any = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
a : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
a : Tuple = self.classifier(self.dropout(lowerCAmelCase__ ) )
a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a : List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a : Any = "single_label_classification"
else:
a : int = "multi_label_classification"
if self.config.problem_type == "regression":
a : Tuple = MSELoss()
if self.num_labels == 1:
a : Dict = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a : str = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
a : List[Any] = CrossEntropyLoss()
a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a : int = BCEWithLogitsLoss()
a : Optional[int] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
a : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 105 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowerCAmelCase : Tuple = FlaxAutoencoderKL
@property
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = 4
snake_case__ : Tuple = 3
snake_case__ : Union[str, Any] = (3_2, 3_2)
snake_case__ : List[Any] = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = jax.random.uniform(__lowercase ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowerCamelCase ( self :Any ):
snake_case__ : Any = {
"""block_out_channels""": [3_2, 6_4],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
snake_case__ : Dict = self.dummy_input
return init_dict, inputs_dict
| 350 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
A__ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''DPTFeatureExtractor''']
A__ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 44 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Load configuration defined in the metadata file
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
_snake_case = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_snake_case = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
_snake_case = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_snake_case = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_snake_case = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_snake_case = AddedToken("""<ent>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_snake_case = AddedToken("""<ent2>""" , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , """r""" ) as f:
_snake_case = json.load(_SCREAMING_SNAKE_CASE )
_snake_case = """MLukeTokenizer"""
with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_snake_case = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
_snake_case = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
_snake_case = state_dict["""embeddings.word_embeddings.weight"""]
_snake_case = word_emb[ent_init_index].unsqueeze(0 )
_snake_case = word_emb[enta_init_index].unsqueeze(0 )
_snake_case = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_snake_case = state_dict[bias_name]
_snake_case = decoder_bias[ent_init_index].unsqueeze(0 )
_snake_case = decoder_bias[enta_init_index].unsqueeze(0 )
_snake_case = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_snake_case = f"""encoder.layer.{layer_index}.attention.self."""
_snake_case = state_dict[prefix + matrix_name]
_snake_case = state_dict[prefix + matrix_name]
_snake_case = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_snake_case = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_snake_case = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_snake_case = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_snake_case = state_dict["""entity_predictions.bias"""]
_snake_case = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
_snake_case = torch.cat([entity_prediction_bias, entity_mask_bias] )
_snake_case = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
_snake_case = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
_snake_case = state_dict[key]
else:
_snake_case = state_dict[key]
_snake_case, _snake_case = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_snake_case = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task="""entity_classification""" )
_snake_case = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
_snake_case = (0, 9)
_snake_case = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="""pt""" )
_snake_case = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case = torch.Size((1, 33, 768) )
_snake_case = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_snake_case = torch.Size((1, 1, 768) )
_snake_case = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_snake_case = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = """Tokyo is the capital of <mask>."""
_snake_case = (24, 30)
_snake_case = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors="""pt""" )
_snake_case = model(**_SCREAMING_SNAKE_CASE )
_snake_case = encoding["""input_ids"""][0].tolist()
_snake_case = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
_snake_case = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_snake_case = outputs.entity_logits[0][0].argmax().item()
_snake_case = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = ["""[MASK]""", """[PAD]""", """[UNK]"""]
_snake_case = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_snake_case = {}
for entry in data:
_snake_case = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_snake_case = entity_id
break
_snake_case = f"""{language}:{entity_name}"""
_snake_case = entity_id
return new_mapping
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__lowerCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
) | 341 |
'''simple docstring'''
__lowerCAmelCase = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
_snake_case = 0
_snake_case = 0
while place < len(_SCREAMING_SNAKE_CASE ):
if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for arabic, roman in ROMAN:
((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
UpperCamelCase = mock.Mock()
UpperCamelCase = 500
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
UpperCamelCase = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase = tempfile.mktemp()
with open(A_ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , A_ )
UpperCamelCase = AlbertTokenizer.from_pretrained(A_ )
finally:
os.remove(A_ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , A_ )
UpperCamelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowercase ( unittest.TestCase ):
__lowercase : Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def __UpperCamelCase ( cls ) -> int:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A_ , repo_id='test-tokenizer' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizer(A_ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=A_ , use_auth_token=self._token )
UpperCamelCase = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = CustomTokenizer(A_ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = os.path.join(A_ , 'vocab.txt' )
with open(A_ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase = BertTokenizerFast.from_pretrained(A_ )
bert_tokenizer.save_pretrained(A_ )
UpperCamelCase = CustomTokenizerFast.from_pretrained(A_ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=A_ , trust_remote_code=A_ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase = Trie()
UpperCamelCase = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(A_ , ['AB', 'C'] )
| 110 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 110 | 1 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : List[Any] = [randint(-1000 , 1000 ) for i in range(10 )]
__SCREAMING_SNAKE_CASE : Tuple = randint(-5000 , 5000 )
return (arr, r)
__lowerCAmelCase : List[Any] =make_dataset()
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for triplet in permutations(lowercase__ , 3 ):
if sum(lowercase__ ) == target:
return tuple(sorted(lowercase__ ) )
return (0, 0, 0)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
arr.sort()
__SCREAMING_SNAKE_CASE : Any = len(lowercase__ )
for i in range(n - 1 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__SCREAMING_SNAKE_CASE : List[str] = '''
triplet_sum1(*dataset)
'''
__SCREAMING_SNAKE_CASE : Any = '''
triplet_sum2(*dataset)
'''
__SCREAMING_SNAKE_CASE : Any = repeat(setup=lowercase__ , stmt=lowercase__ , repeat=5 , number=10000 )
__SCREAMING_SNAKE_CASE : List[str] = repeat(setup=lowercase__ , stmt=lowercase__ , repeat=5 , number=10000 )
return (min(lowercase__ ), min(lowercase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : List[Any] =solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 9 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
lowerCAmelCase : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
lowerCAmelCase : Union[str, Any] = """▁"""
# Segments (not really needed)
lowerCAmelCase : str = 0
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[Any] = 3
lowerCAmelCase : List[Any] = 4
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = "left"
__UpperCamelCase = XLNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=False , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<sep>" , _a="<pad>" , _a="<cls>" , _a="<mask>" , _a=["<eop>", "<eod>"] , **_a , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 291 | 0 |
"""simple docstring"""
import sys
import turtle
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
lowercase__ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
lowercase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 161 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
lowerCAmelCase_ : str = np.array(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = (1, 2, 1)
lowerCAmelCase_ : str = (1, 1, 0, 7)
lowerCAmelCase_ : List[Any] = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = model.fit(disp=__UpperCamelCase , maxiter=600 , method="nm" )
lowerCAmelCase_ : Optional[Any] = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : int = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Dict = regressor.predict(__UpperCamelCase )
return y_pred[0]
def __lowerCamelCase ( __UpperCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
lowerCAmelCase_ : Optional[Any] = np.percentile(__UpperCamelCase , 25 )
lowerCAmelCase_ : List[Any] = np.percentile(__UpperCamelCase , 75 )
lowerCAmelCase_ : Union[str, Any] = qa - qa
lowerCAmelCase_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase_ : Tuple = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase__ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase__ = normalize_df[:, 2].tolist()
lowercase__ = normalize_df[:, 0].tolist()
lowercase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase__ = normalize_df[:, [1, 2]].tolist()
lowercase__ = x[: len(x) - 1]
lowercase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase__ = total_date[: len(total_date) - 1]
lowercase__ = total_user[: len(total_user) - 1]
lowercase__ = total_match[: len(total_match) - 1]
lowercase__ = total_date[len(total_date) - 1 :]
lowercase__ = total_user[len(total_user) - 1 :]
lowercase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase__ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 161 | 1 |
from __future__ import annotations
lowercase__ : Dict = "#"
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> None:
'''simple docstring'''
__UpperCamelCase = {}
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
__UpperCamelCase = self._trie
for char in text:
if char not in trie:
__UpperCamelCase = {}
__UpperCamelCase = trie[char]
__UpperCamelCase = True
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> tuple | list:
'''simple docstring'''
__UpperCamelCase = self._trie
for char in prefix:
if char in trie:
__UpperCamelCase = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> tuple:
'''simple docstring'''
__UpperCamelCase = []
for c, v in d.items():
__UpperCamelCase = [''' '''] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )]
result.extend(SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = Trie()
lowercase__ : Union[str, Any] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def A_ ( snake_case : str ) -> tuple:
'''simple docstring'''
__UpperCamelCase = trie.find_word(snake_case )
return tuple(string + word for word in suffixes )
def A_ ( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 328 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'whisper'
_snake_case = ['past_key_values']
_snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCamelCase = {0: '''batch'''}
else:
__UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
return common_inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = encoder_inputs['''input_features'''].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = encoder_inputs.pop('''input_features''' )
__UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-3
| 328 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _SCREAMING_SNAKE_CASE :
def __init__( self : Dict , a__ : int , a__ : Dict=None , a__ : List[str]=None , a__ : Union[str, Any]=None , a__ : str="resnet50" , a__ : Tuple=3 , a__ : Tuple=32 , a__ : Union[str, Any]=3 , a__ : Tuple=True , a__ : Optional[Any]=True , ):
__magic_name__ = parent
__magic_name__ = out_indices if out_indices is not None else [4]
__magic_name__ = stage_names
__magic_name__ = out_features
__magic_name__ = backbone
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = num_channels
__magic_name__ = use_pretrained_backbone
__magic_name__ = is_training
def snake_case__ ( self : Optional[int] ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = self.get_config()
return config, pixel_values
def snake_case__ ( self : int ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def snake_case__ ( self : List[Any] , a__ : Union[str, Any] , a__ : List[Any] ):
__magic_name__ = TimmBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__magic_name__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ = config_and_inputs
__magic_name__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :List[Any] = (TimmBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE :str = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE :Optional[int] = False
__SCREAMING_SNAKE_CASE :Optional[int] = False
__SCREAMING_SNAKE_CASE :Optional[Any] = False
__SCREAMING_SNAKE_CASE :Optional[Any] = False
def snake_case__ ( self : List[str] ):
__magic_name__ = TimmBackboneModelTester(self )
__magic_name__ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Union[str, Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[int] ):
__magic_name__ = "resnet18"
__magic_name__ = "microsoft/resnet-18"
__magic_name__ = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE )
__magic_name__ = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__magic_name__ = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
__magic_name__ = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def snake_case__ ( self : Optional[int] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def snake_case__ ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def snake_case__ ( self : Tuple ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def snake_case__ ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def snake_case__ ( self : Any ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Dict ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : Optional[int] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def snake_case__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : str ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def snake_case__ ( self : Tuple ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def snake_case__ ( self : str ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def snake_case__ ( self : List[str] ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def snake_case__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case__ ( self : List[str] ):
pass
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(_SCREAMING_SNAKE_CASE )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
__magic_name__ = self.has_attentions
# no need to test all models as different heads yield the same functionality
__magic_name__ = self.all_model_classes[0]
__magic_name__ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
__magic_name__ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__magic_name__ = model(**_SCREAMING_SNAKE_CASE )
__magic_name__ = outputs[0][-1]
# Encoder-/Decoder-only models
__magic_name__ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__magic_name__ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def snake_case__ ( self : int ):
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__magic_name__ = model(**_SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__magic_name__ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__magic_name__ = None
__magic_name__ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__magic_name__ = model(**_SCREAMING_SNAKE_CASE )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__magic_name__ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
__magic_name__ = False
__magic_name__ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__magic_name__ = model(**_SCREAMING_SNAKE_CASE )
| 364 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_lowerCAmelCase = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98 | 0 |
"""simple docstring"""
from __future__ import annotations
import bisect
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowerCAmelCase = len(snake_case__ )
while lo < hi:
lowerCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid
return lo
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowerCAmelCase = len(snake_case__ )
while lo < hi:
lowerCAmelCase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCAmelCase = mid + 1
else:
lowerCAmelCase = mid
return lo
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int = 0 , snake_case__ : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
def lowercase (snake_case__ : list[int] , snake_case__ : int ) -> int | None:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = len(snake_case__ ) - 1
while left <= right:
lowerCAmelCase = left + (right - left) // 2
lowerCAmelCase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCAmelCase = midpoint - 1
else:
lowerCAmelCase = midpoint + 1
return None
def lowercase (snake_case__ : list[int] , snake_case__ : int ) -> int | None:
'''simple docstring'''
lowerCAmelCase = bisect.bisect_left(snake_case__ , snake_case__ )
if index != len(snake_case__ ) and sorted_collection[index] == item:
return index
return None
def lowercase (snake_case__ : list[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> int | None:
'''simple docstring'''
if right < left:
return None
lowerCAmelCase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , midpoint - 1 )
else:
return binary_search_by_recursion(snake_case__ , snake_case__ , midpoint + 1 , snake_case__ )
if __name__ == "__main__":
a = input('Enter numbers separated by comma:\n').strip()
a = sorted(int(item) for item in user_input.split(','))
a = int(input('Enter a single number to be found in the list:\n'))
a = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 155 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a = logging.get_logger(__name__)
a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'deberta-v2'
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any]=12_8100 , lowerCAmelCase : Any=1536 , lowerCAmelCase : str=24 , lowerCAmelCase : str=24 , lowerCAmelCase : Optional[Any]=6144 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=1e-7 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Dict=-1 , lowerCAmelCase : int=0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : int=None , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict="gelu" , **lowerCAmelCase : Any , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = relative_attention
lowerCAmelCase = max_relative_positions
lowerCAmelCase = pad_token_id
lowerCAmelCase = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase ) == str:
lowerCAmelCase = [x.strip() for x in pos_att_type.lower().split("""|""" )]
lowerCAmelCase = pos_att_type
lowerCAmelCase = vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = kwargs.get("""pooler_hidden_size""" , lowerCAmelCase )
lowerCAmelCase = pooler_dropout
lowerCAmelCase = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( _a ):
@property
def __lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def __lowercase ( self : List[str] ):
return 12
def __lowercase ( self : int , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
lowerCAmelCase = super().generate_dummy_inputs(preprocessor=lowerCAmelCase , framework=lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 155 | 1 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :Optional[Any] , a_ :List[Any]) -> list[int]:
__a : Union[str, Any] = 0
__a : Optional[int] = len(__snake_case) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a : List[Any] = i + 1
else:
__a : Any = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{two_pointer([2, 7, 11, 15], 9) = }') | 362 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''', '''tokenizer''']
__lowerCAmelCase = '''CLIPImageProcessor'''
__lowerCAmelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
__a : Any = kwargs.pop('''feature_extractor''' )
__a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a : Any = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
__a : List[str] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__a : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.tokenizer.model_input_names
__a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 188 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.