code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE ="\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str=8 ):
lowercase_ : List[Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : int = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE ,unet=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE ,movq=__SCREAMING_SNAKE_CASE ,)
lowercase_ : str = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]:
'''simple docstring'''
if latents is None:
lowercase_ : Union[str, Any] = randn_tensor(__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,device=__SCREAMING_SNAKE_CASE ,dtype=__SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase_ : List[str] = latents.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
__SCREAMING_SNAKE_CASE ,padding='max_length' ,truncation=__SCREAMING_SNAKE_CASE ,max_length=77 ,return_attention_mask=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,return_tensors='pt' ,)
lowercase_ : List[str] = text_inputs.input_ids
lowercase_ : List[Any] = self.tokenizer(__SCREAMING_SNAKE_CASE ,padding='longest' ,return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase_ : List[Any] = text_input_ids.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = text_inputs.attention_mask.to(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Tuple = self.text_encoder(
input_ids=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = prompt_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE ,dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(__SCREAMING_SNAKE_CASE ,dim=0 )
lowercase_ : Any = text_mask.repeat_interleave(__SCREAMING_SNAKE_CASE ,dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = 42
if negative_prompt is None:
lowercase_ : Optional[Any] = [''] * batch_size
elif type(__SCREAMING_SNAKE_CASE ) is not type(__SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__SCREAMING_SNAKE_CASE )} !='''
f''' {type(__SCREAMING_SNAKE_CASE )}.''' )
elif isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [negative_prompt]
elif batch_size != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__SCREAMING_SNAKE_CASE )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowercase_ : List[Any] = negative_prompt
lowercase_ : Tuple = self.tokenizer(
__SCREAMING_SNAKE_CASE ,padding='max_length' ,max_length=77 ,truncation=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,return_tensors='pt' ,)
lowercase_ : Optional[Any] = uncond_input.input_ids.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : Union[str, Any] = negative_prompt_embeds.shape[1]
lowercase_ : Tuple = negative_prompt_embeds.repeat(1 ,__SCREAMING_SNAKE_CASE )
lowercase_ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__SCREAMING_SNAKE_CASE )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 ,__SCREAMING_SNAKE_CASE ,1 )
lowercase_ : Any = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,__SCREAMING_SNAKE_CASE ,-1 )
lowercase_ : Optional[Any] = uncond_text_mask.repeat_interleave(__SCREAMING_SNAKE_CASE ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Dict = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Union[str, Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _UpperCAmelCase ( self ,__UpperCamelCase=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : Tuple = torch.device(f'''cuda:{gpu_id}''' )
lowercase_ : Tuple = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ,__UpperCamelCase=0 ) -> Any:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=__SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,prev_module_hook=__SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Any = cpu_offload_with_hook(self.safety_checker ,__SCREAMING_SNAKE_CASE ,prev_module_hook=__SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__SCREAMING_SNAKE_CASE )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = 512 ,__UpperCamelCase = 512 ,__UpperCamelCase = 100 ,__UpperCamelCase = 4.0 ,__UpperCamelCase = 1 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,) -> Optional[int]:
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = 1
elif isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
lowercase_ : Tuple = len(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__SCREAMING_SNAKE_CASE )}''' )
lowercase_ : str = self._execution_device
lowercase_ : Union[str, Any] = batch_size * num_images_per_prompt
lowercase_ : List[Any] = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : Dict = self._encode_prompt(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = torch.cat(__SCREAMING_SNAKE_CASE ,dim=0 )
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = torch.cat(__SCREAMING_SNAKE_CASE ,dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE ,dim=0 )
lowercase_ : Any = negative_image_embeds.repeat_interleave(__SCREAMING_SNAKE_CASE ,dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=__SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE ,device=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.scheduler.timesteps
lowercase_ : int = self.unet.config.in_channels
lowercase_ , lowercase_ : List[Any] = get_new_h_w(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(__SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
lowercase_ : Any = self.unet(
sample=__SCREAMING_SNAKE_CASE ,timestep=__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,added_cond_kwargs=__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ,)[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] ,dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : str = variance_pred.chunk(2 )
lowercase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : List[Any] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Optional[Any] = self.scheduler.step(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,).prev_sample
# post-processing
lowercase_ : Any = self.movq.decode(__SCREAMING_SNAKE_CASE ,force_not_quantize=__SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase_ : str = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 ,1 )
lowercase_ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowercase_ : Dict = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 425
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68
| 0
|
def snake_case_ ( snake_case = 1_00_00_00 ) -> int:
lowercase__: Any = 1
lowercase__: str = 1
lowercase__: List[str] = {1: 1}
for inputa in range(2 , snake_case ):
lowercase__: Dict = 0
lowercase__: Union[str, Any] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase__: List[str] = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase__: Any = counter
if counter > pre_counter:
lowercase__: Optional[int] = inputa
lowercase__: Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 335
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase = {
'''gpt2''': 10_24,
'''gpt2-medium''': 10_24,
'''gpt2-large''': 10_24,
'''gpt2-xl''': 10_24,
'''distilgpt2''': 10_24,
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = ['input_ids', 'attention_mask']
__lowercase : Any = GPTaTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__: Optional[Any] = kwargs.pop('add_bos_token' , lowerCAmelCase__ )
lowercase__: Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__ ) != add_prefix_space:
lowercase__: Optional[int] = getattr(lowerCAmelCase__ , pre_tok_state.pop('type' ) )
lowercase__: Union[str, Any] = add_prefix_space
lowercase__: Tuple = pre_tok_class(**lowerCAmelCase__ )
lowercase__: Optional[int] = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: List[str] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__: Union[str, Any] = kwargs.get('is_split_into_words' , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__: Dict = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[int]:
'''simple docstring'''
lowercase__: List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowercase__: Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 335
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
for param in module.parameters():
snake_case_ : Dict = False
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case_ : Tuple = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
snake_case_ : List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = datetime.now()
snake_case_ : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 60
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[Any] = '''encoder-decoder'''
lowerCamelCase_ : Optional[Any] = True
def __init__(self , **__magic_name__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(**__magic_name__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case_ : Any = kwargs.pop('''encoder''' )
snake_case_ : Tuple = encoder_config.pop('''model_type''' )
snake_case_ : Union[str, Any] = kwargs.pop('''decoder''' )
snake_case_ : Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
snake_case_ : Optional[int] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : List[str] = AutoConfig.for_model(__magic_name__ , **__magic_name__ )
snake_case_ : Any = True
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , **__magic_name__ ) -> PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__magic_name__ )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : str = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.encoder.to_dict()
snake_case_ : Dict = self.decoder.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 60
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__A : List[Any] = get_logger(__name__)
__A : Tuple = Path(__file__).parent / 'model_card_template.md'
__A : Any = uuida().hex
__A : Optional[int] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__A : Tuple = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__A : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def snake_case__ ( _lowerCamelCase = None ) ->str:
"""simple docstring"""
__lowercase : Optional[Any] = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI", "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_lowerCamelCase, _lowerCamelCase ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
ua += "; " + user_agent
return ua
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None, _lowerCamelCase = None ) ->List[Any]:
"""simple docstring"""
if token is None:
__lowercase : Any = HfFolder.get_token()
if organization is None:
__lowercase : int = whoami(_lowerCamelCase )["name"]
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Dict:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(_lowerCamelCase, "local_rank" ) and args.local_rank not in [-1, 0]:
return
__lowercase : Tuple = args.hub_token if hasattr(_lowerCamelCase, "hub_token" ) else None
__lowercase : int = get_full_repo_name(_lowerCamelCase, token=_lowerCamelCase )
__lowercase : Optional[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en", license="apache-2.0", library_name="diffusers", tags=[], datasets=args.dataset_name, metrics=[], ), template_path=_lowerCamelCase, model_name=_lowerCamelCase, repo_name=_lowerCamelCase, dataset_name=args.dataset_name if hasattr(_lowerCamelCase, "dataset_name" ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_lowerCamelCase, "gradient_accumulation_steps" ) else None
), adam_betaa=args.adam_betaa if hasattr(_lowerCamelCase, "adam_beta1" ) else None, adam_betaa=args.adam_betaa if hasattr(_lowerCamelCase, "adam_beta2" ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(_lowerCamelCase, "adam_weight_decay" ) else None, adam_epsilon=args.adam_epsilon if hasattr(_lowerCamelCase, "adam_epsilon" ) else None, lr_scheduler=args.lr_scheduler if hasattr(_lowerCamelCase, "lr_scheduler" ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(_lowerCamelCase, "lr_warmup_steps" ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(_lowerCamelCase, "ema_inv_gamma" ) else None, ema_power=args.ema_power if hasattr(_lowerCamelCase, "ema_power" ) else None, ema_max_decay=args.ema_max_decay if hasattr(_lowerCamelCase, "ema_max_decay" ) else None, mixed_precision=args.mixed_precision, )
__lowercase : Dict = os.path.join(args.output_dir, "README.md" )
model_card.save(_lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None ) ->Tuple:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
__lowercase : Union[str, Any] = str(Path(_lowerCamelCase ).as_posix() )
__lowercase : int = re.search(R"snapshots/([^/]+)/", _lowerCamelCase )
if search is None:
return None
__lowercase : Union[str, Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_lowerCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__A : Union[str, Any] = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__A : List[str] = os.path.join(hf_cache_home, 'diffusers')
def snake_case__ ( _lowerCamelCase = None, _lowerCamelCase = None ) ->None:
"""simple docstring"""
if new_cache_dir is None:
__lowercase : int = DIFFUSERS_CACHE
if old_cache_dir is None:
__lowercase : Tuple = old_diffusers_cache
__lowercase : str = Path(_lowerCamelCase ).expanduser()
__lowercase : Optional[int] = Path(_lowerCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__lowercase : str = new_cache_dir / old_blob_path.relative_to(_lowerCamelCase )
new_blob_path.parent.mkdir(parents=_lowerCamelCase, exist_ok=_lowerCamelCase )
os.replace(_lowerCamelCase, _lowerCamelCase )
try:
os.symlink(_lowerCamelCase, _lowerCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__A : int = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__A : Optional[Any] = 0
else:
with open(cache_version_file) as f:
try:
__A : Optional[int] = int(f.read())
except ValueError:
__A : Optional[Any] = 0
if cache_version < 1:
__A : Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__A : Union[str, Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None ) ->str:
"""simple docstring"""
if variant is not None:
__lowercase : str = weights_name.split("." )
__lowercase : str = splits[:-1] + [variant] + splits[-1:]
__lowercase : Dict = ".".join(_lowerCamelCase )
return weights_name
def snake_case__ ( _lowerCamelCase, *,
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, ) ->str:
"""simple docstring"""
__lowercase : Dict = str(_lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(_lowerCamelCase ):
if os.path.isfile(os.path.join(_lowerCamelCase, _lowerCamelCase ) ):
# Load from a PyTorch checkpoint
__lowercase : str = os.path.join(_lowerCamelCase, _lowerCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ):
__lowercase : Dict = os.path.join(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
__lowercase : Any = hf_hub_download(
_lowerCamelCase, filename=_add_variant(_lowerCamelCase, _lowerCamelCase ), cache_dir=_lowerCamelCase, force_download=_lowerCamelCase, proxies=_lowerCamelCase, resume_download=_lowerCamelCase, local_files_only=_lowerCamelCase, use_auth_token=_lowerCamelCase, user_agent=_lowerCamelCase, subfolder=_lowerCamelCase, revision=revision or commit_hash, )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.', _lowerCamelCase, )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_lowerCamelCase, _lowerCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_lowerCamelCase, _lowerCamelCase )}\' so that the correct variant file can be added.', _lowerCamelCase, )
try:
# 2. Load model file as usual
__lowercase : str = hf_hub_download(
_lowerCamelCase, filename=_lowerCamelCase, cache_dir=_lowerCamelCase, force_download=_lowerCamelCase, proxies=_lowerCamelCase, resume_download=_lowerCamelCase, local_files_only=_lowerCamelCase, use_auth_token=_lowerCamelCase, user_agent=_lowerCamelCase, subfolder=_lowerCamelCase, revision=revision or commit_hash, )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' )
| 715
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A : int = '\nHuman: <<task>>\n\nAssistant: '
__A : List[str] = 'huggingface-tools/default-prompts'
__A : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase="run" ) ->Optional[int]:
"""simple docstring"""
if prompt_or_repo_id is None:
__lowercase : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s", _lowerCamelCase ) is not None:
return prompt_or_repo_id
__lowercase : Optional[Any] = cached_file(
_lowerCamelCase, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name} )
with open(_lowerCamelCase, "r", encoding="utf-8" ) as f:
return f.read()
| 281
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__ ( SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
for char in word:
_UpperCAmelCase = ord(SCREAMING_SNAKE_CASE_ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE_ ):
return 0
return 1
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = set()
for token in tokens:
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE_ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
return word_list
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : set() ) -> Tuple:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for w in chinese_word_set] )
_UpperCAmelCase = bert_tokens
_UpperCAmelCase , _UpperCAmelCase = 0, len(SCREAMING_SNAKE_CASE_ )
while start < end:
_UpperCAmelCase = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase = min(end - start , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ , 1 , -1 ):
_UpperCAmelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCAmelCase = '''##''' + bert_word[j]
_UpperCAmelCase = start + i
_UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def A__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : LTP , SCREAMING_SNAKE_CASE_ : BertTokenizer ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 1_00 ):
_UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
_UpperCAmelCase = [get_chinese_word(SCREAMING_SNAKE_CASE_ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 1_00 ):
_UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = []
for id in input_ids:
_UpperCAmelCase = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE_ )
input_tokens.append(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = add_sub_symbol(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
if token[:2] == "##":
_UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE_ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE_ ) ):
ref_id.append(SCREAMING_SNAKE_CASE_ )
ref_ids.append(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
return ref_ids
def A__ ( SCREAMING_SNAKE_CASE_ : Tuple ) -> Dict:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase = prepare_ref(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = [json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
UpperCAmelCase_ = parser.parse_args()
main(args)
| 32
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'ctrl'
__snake_case = ['past_key_values']
__snake_case = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __lowerCamelCase=2_4_6_5_3_4 , __lowerCamelCase=2_5_6 , __lowerCamelCase=1_2_8_0 , __lowerCamelCase=8_1_9_2 , __lowerCamelCase=4_8 , __lowerCamelCase=1_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=1E-6 , __lowerCamelCase=0.02 , __lowerCamelCase=True , **__lowerCamelCase , ) -> int:
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Dict = n_positions
_SCREAMING_SNAKE_CASE : Optional[int] = n_embd
_SCREAMING_SNAKE_CASE : Dict = n_layer
_SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
_SCREAMING_SNAKE_CASE : List[str] = dff
_SCREAMING_SNAKE_CASE : List[Any] = resid_pdrop
_SCREAMING_SNAKE_CASE : int = embd_pdrop
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : Any = use_cache
super().__init__(**__lowerCamelCase )
| 381
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCamelCase ) - 1
def UpperCamelCase_ ( self , __lowerCamelCase ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def UpperCamelCase_ ( self , __lowerCamelCase ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_SCREAMING_SNAKE_CASE : Optional[int] = self.basis_function(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0.0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self , __lowerCamelCase = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
_SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
_SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
_SCREAMING_SNAKE_CASE : Dict = 0.0
while t <= 1:
_SCREAMING_SNAKE_CASE : str = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in self.list_of_points]
_SCREAMING_SNAKE_CASE : Dict = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 381
| 1
|
from __future__ import annotations
__snake_case = '''#'''
class __lowerCamelCase :
def __init__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = {}
def snake_case_ ( self: str,A_: str ):
'''simple docstring'''
__UpperCamelCase = self._trie
for char in text:
if char not in trie:
__UpperCamelCase = {}
__UpperCamelCase = trie[char]
__UpperCamelCase = True
def snake_case_ ( self: Union[str, Any],A_: str ):
'''simple docstring'''
__UpperCamelCase = self._trie
for char in prefix:
if char in trie:
__UpperCamelCase = trie[char]
else:
return []
return self._elements(A_ )
def snake_case_ ( self: Any,A_: dict ):
'''simple docstring'''
__UpperCamelCase = []
for c, v in d.items():
__UpperCamelCase = [' '] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
__snake_case = Trie()
__snake_case = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _A ( _lowercase ) -> tuple:
"""simple docstring"""
__UpperCamelCase = trie.find_word(_lowercase )
return tuple(string + word for word in suffixes )
def _A ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 1
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE :int = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase_ ( __lowercase : List[str] ) -> str:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Dict ) -> str:
'''simple docstring'''
if args.student_type == "roberta":
_UpperCAmelCase = False
elif args.student_type == "gpt2":
_UpperCAmelCase = False
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_UpperCAmelCase = False
def UpperCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=__lowercase , required=__lowercase , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=__lowercase , required=__lowercase , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=__lowercase , choices=["distilbert", "roberta", "gpt2"] , required=__lowercase , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=__lowercase , required=__lowercase , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=__lowercase , type=__lowercase , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=__lowercase , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=__lowercase , required=__lowercase , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=__lowercase , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=__lowercase , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=__lowercase , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=__lowercase , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=__lowercase , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=__lowercase , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.15 , type=__lowercase , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=__lowercase , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=__lowercase , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=__lowercase , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=__lowercase , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=__lowercase , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=__lowercase , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=__lowercase , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=__lowercase , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.05 , type=__lowercase , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=__lowercase , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5E-4 , type=__lowercase , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1E-6 , type=__lowercase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=__lowercase , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.02 , type=__lowercase , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowercase , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=__lowercase , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=__lowercase , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=__lowercase , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=__lowercase , default=4000 , help="Checkpoint interval." )
_UpperCAmelCase = parser.parse_args()
sanity_checks(__lowercase )
# ARGS #
init_gpu_params(__lowercase )
set_seed(__lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(f'Param: {args}' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(__lowercase ) , __lowercase , indent=4 )
git_log(args.dump_path )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[args.student_type]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_UpperCAmelCase = tokenizer.all_special_tokens.index(__lowercase )
_UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}' )
_UpperCAmelCase = special_tok_ids
_UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}' )
with open(args.data_file , "rb" ) as fp:
_UpperCAmelCase = pickle.load(__lowercase )
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , "rb" ) as fp:
_UpperCAmelCase = pickle.load(__lowercase )
_UpperCAmelCase = np.maximum(__lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_UpperCAmelCase = 0.0 # do not predict special tokens
_UpperCAmelCase = torch.from_numpy(__lowercase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = LmSeqsDataset(params=__lowercase , data=__lowercase )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'Loading student config from {args.student_config}' )
_UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
_UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}' )
_UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=__lowercase )
else:
_UpperCAmelCase = student_model_class(__lowercase )
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}' )
logger.info("Student loaded." )
# TEACHER #
_UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__lowercase )
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}' )
logger.info(f'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__lowercase , __lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__lowercase , __lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_UpperCAmelCase = Distiller(
params=__lowercase , dataset=__lowercase , token_probs=__lowercase , student=__lowercase , teacher=__lowercase )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 236
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roformer"
def __init__( self : List[str] , __lowerCamelCase : Tuple=5_0000 , __lowerCamelCase : Dict=None , __lowerCamelCase : Any=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : str=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=1536 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : Dict=1e-12 , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : int=False , __lowerCamelCase : Tuple=True , **__lowerCamelCase : int , ) -> Optional[int]:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size if embedding_size is None else embedding_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = rotary_value
SCREAMING_SNAKE_CASE__ = use_cache
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 472
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Dict = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 472
| 1
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCamelCase__ ( __lowerCamelCase : bytes ):
'''simple docstring'''
if len(__lowerCamelCase ) != 3_2:
raise ValueError('Input must be of length 32' )
_UpperCAmelCase : Union[str, Any] =b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_UpperCAmelCase : Tuple =format(__lowerCamelCase , '08x' )[-8:]
_UpperCAmelCase : List[Any] =b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def lowerCamelCase__ ( __lowerCamelCase : bytes ):
'''simple docstring'''
_UpperCAmelCase : Dict =b''
for char in message:
bit_string += format(__lowerCamelCase , '08b' ).encode('utf-8' )
_UpperCAmelCase : str =format(len(__lowerCamelCase ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCamelCase ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def lowerCamelCase__ ( __lowerCamelCase : bytes ):
'''simple docstring'''
if len(__lowerCamelCase ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__lowerCamelCase ) , 5_1_2 ):
_UpperCAmelCase : List[str] =bit_string[pos : pos + 5_1_2]
_UpperCAmelCase : Union[str, Any] =[]
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def lowerCamelCase__ ( __lowerCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
_UpperCAmelCase : List[Any] =format(__lowerCamelCase , '032b' )
_UpperCAmelCase : Tuple =''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCamelCase , 2 )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**3_2
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def lowerCamelCase__ ( __lowerCamelCase : bytes ):
'''simple docstring'''
_UpperCAmelCase : Dict =preprocess(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =[int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_UpperCAmelCase : Tuple =0X67_45_23_01
_UpperCAmelCase : Optional[int] =0XEF_CD_AB_89
_UpperCAmelCase : str =0X98_BA_DC_FE
_UpperCAmelCase : List[Any] =0X10_32_54_76
_UpperCAmelCase : Any =[
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCamelCase ):
_UpperCAmelCase : Any =aa
_UpperCAmelCase : Any =ba
_UpperCAmelCase : List[str] =ca
_UpperCAmelCase : Optional[Any] =da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_UpperCAmelCase : str =d ^ (b & (c ^ d))
_UpperCAmelCase : Union[str, Any] =i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_UpperCAmelCase : int =c ^ (d & (b ^ c))
_UpperCAmelCase : Any =(5 * i + 1) % 1_6
elif i <= 4_7:
_UpperCAmelCase : int =b ^ c ^ d
_UpperCAmelCase : Dict =(3 * i + 5) % 1_6
else:
_UpperCAmelCase : List[str] =c ^ (b | not_aa(__lowerCamelCase ))
_UpperCAmelCase : Tuple =(7 * i) % 1_6
_UpperCAmelCase : int =(f + a + added_consts[i] + block_words[g]) % 2**3_2
_UpperCAmelCase : Union[str, Any] =d
_UpperCAmelCase : Optional[Any] =c
_UpperCAmelCase : Optional[Any] =b
_UpperCAmelCase : Dict =sum_aa(__lowerCamelCase , left_rotate_aa(__lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_UpperCAmelCase : int =sum_aa(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[Any] =sum_aa(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Any =sum_aa(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : Tuple =sum_aa(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : int =reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 446
|
'''simple docstring'''
from typing import Any
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray ):
'''simple docstring'''
return np.array_equal(__lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase : str =v.conjugate().T
_UpperCAmelCase : Optional[int] =v_star.dot(__lowerCamelCase )
assert isinstance(__lowerCamelCase , np.ndarray )
return (v_star_dot.dot(__lowerCamelCase )) / (v_star.dot(__lowerCamelCase ))
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCAmelCase : List[str] =np.array([[1], [2], [3]] )
assert is_hermitian(__lowerCamelCase ), f"{a} is not hermitian."
print(rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) )
_UpperCAmelCase : List[str] =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowerCamelCase ), f"{a} is not hermitian."
assert rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 446
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Any = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Dict = logging.getLogger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_UpperCAmelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_UpperCAmelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=f'val_{metric}' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class _a ( pl.Callback):
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Tuple=True )->None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / '''test_results.txt'''
_UpperCAmelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_UpperCAmelCase = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , '''a+''' ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'{key}: {val:.6f}\n'
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__UpperCamelCase )
@rank_zero_only
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict )->Union[str, Any]:
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule )->List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , '''test''' )
@rank_zero_only
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] )->Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 95
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , __snake_case : NestedDataStructureLike[PathLike] , __snake_case : Optional[NamedSplit] = None , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[str] = None , __snake_case : Optional[int] = None , **__snake_case : Tuple , ) -> Any:
super().__init__(
__snake_case , split=__snake_case , features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
__magic_name__: Union[str, Any] = field
__magic_name__: Optional[int] = path_or_paths if isinstance(__snake_case , __snake_case ) else {self.split: path_or_paths}
__magic_name__: Optional[int] = Json(
cache_dir=__snake_case , data_files=__snake_case , features=__snake_case , field=__snake_case , **__snake_case , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
# Build iterable dataset
if self.streaming:
__magic_name__: List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__: int = None
__magic_name__: Optional[int] = None
__magic_name__: Union[str, Any] = None
__magic_name__: Optional[Any] = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
__magic_name__: List[str] = self.builder.as_dataset(
split=self.split , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
class __A :
def __init__( self : List[str] , __snake_case : Dataset , __snake_case : Union[PathLike, BinaryIO] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , **__snake_case : Dict , ) -> Dict:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__magic_name__: Dict = dataset
__magic_name__: int = path_or_buf
__magic_name__: Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__: Any = num_proc
__magic_name__: Dict = """utf-8"""
__magic_name__: List[Any] = to_json_kwargs
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Union[str, Any] = self.to_json_kwargs.pop("""path_or_buf""" , __snake_case )
__magic_name__: str = self.to_json_kwargs.pop("""orient""" , """records""" )
__magic_name__: Dict = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
__magic_name__: List[str] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
__magic_name__: List[Any] = self.to_json_kwargs.pop("""compression""" , __snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=__snake_case ) as buffer:
__magic_name__: Any = self._write(file_obj=__snake_case , orient=__snake_case , lines=__snake_case , index=__snake_case , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
__magic_name__: Tuple = self._write(
file_obj=self.path_or_buf , orient=__snake_case , lines=__snake_case , index=__snake_case , **self.to_json_kwargs )
return written
def lowerCamelCase__ ( self : Dict , __snake_case : List[Any] ) -> Any:
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__: Union[str, Any] = args
__magic_name__: Tuple = query_table(
table=self.dataset.data , key=slice(__snake_case , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__: int = batch.to_pandas().to_json(
path_or_buf=__snake_case , orient=__snake_case , lines=__snake_case , index=__snake_case , **__snake_case )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCamelCase__ ( self : int , __snake_case : BinaryIO , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Union[str, Any] , **__snake_case : Optional[Any] , ) -> int:
__magic_name__: List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
__magic_name__: Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__snake_case )
else:
__magic_name__, __magic_name__: List[str] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __snake_case , __snake_case )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(__snake_case )
return written
| 96
|
import requests
from bsa import BeautifulSoup
def __magic_name__ ( SCREAMING_SNAKE_CASE = "AAPL" ) -> str:
_lowercase : str = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowercase : int = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowercase : List[str] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 66
| 0
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_="None" , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : List[str] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Optional[Any] = use_input_mask
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : List[Any] = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : int = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : Any = relative_attention
__UpperCAmelCase : str = position_biased_input
__UpperCAmelCase : Any = pos_att_type
__UpperCAmelCase : List[Any] = scope
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[str] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : List[Any] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = TFDebertaVaModel(config=UpperCamelCase_ )
__UpperCAmelCase : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCAmelCase : int = [input_ids, input_mask]
__UpperCAmelCase : Any = model(UpperCamelCase_ )
__UpperCAmelCase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = TFDebertaVaForMaskedLM(config=UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Any = TFDebertaVaForSequenceClassification(config=UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : str = TFDebertaVaForTokenClassification(config=UpperCamelCase_ )
__UpperCAmelCase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = TFDebertaVaForQuestionAnswering(config=UpperCamelCase_ )
__UpperCAmelCase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : int = config_and_inputs
__UpperCAmelCase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A (__magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case :Tuple = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case :List[Any] = False
snake_case :Optional[int] = False
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = TFDebertaVaModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
class __A (unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
__UpperCAmelCase : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCAmelCase : Optional[Any] = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__UpperCAmelCase : Dict = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
__UpperCAmelCase : Any = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 )
| 710
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowercase =quote(_lowerCAmelCase )
return hfh.hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' , revision=_lowerCAmelCase )
| 474
|
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _A ( _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowerCAmelCase )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCAmelCase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowerCAmelCase__ = field(
default=A , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowerCAmelCase__ = field(
default=A , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowerCAmelCase__ = field(
default=A , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowerCAmelCase__ = field(
default=A , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowerCAmelCase__ = field(
default=A , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowerCAmelCase__ = list_field(
default=A , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
try:
int(_lowerCAmelCase )
return True
except ValueError:
return False
def _A ( _lowerCAmelCase ):
"""simple docstring"""
try:
float(_lowerCAmelCase )
return True
except ValueError:
return False
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =args
__lowercase =defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline='') as csv_file:
__lowercase =csv.DictReader(_lowerCAmelCase)
for row in reader:
__lowercase =row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size']))
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length']))
if can_convert_to_int(row['result']):
# value is not None
__lowercase =int(row['result'])
elif can_convert_to_float(row['result']):
# value is not None
__lowercase =float(row['result'])
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase , __lowercase =plt.subplots()
__lowercase ='Time usage' if self.args.is_time else 'Memory usage'
__lowercase =title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log')
ax.set_yscale('log')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
__lowercase =sorted(set(self.result_dict[model_name]['bsz']))
__lowercase =sorted(set(self.result_dict[model_name]['seq_len']))
__lowercase =self.result_dict[model_name]['result']
((__lowercase) , (__lowercase)) =(
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase =(
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase =np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCAmelCase , )
else:
__lowercase =np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase) , (__lowercase)) =(
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__lowercase =np.asarray(_lowerCAmelCase , _lowerCAmelCase)[: len(_lowerCAmelCase)]
plt.scatter(
_lowerCAmelCase , _lowerCAmelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""")
plt.plot(_lowerCAmelCase , _lowerCAmelCase , '--')
title_str += f""" {label_model_name} vs."""
__lowercase =title_str[:-4]
__lowercase ='Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(_lowerCAmelCase)
plt.xlabel(_lowerCAmelCase)
plt.ylabel(_lowerCAmelCase)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _A ( ):
"""simple docstring"""
__lowercase =HfArgumentParser(_lowerCAmelCase )
__lowercase =parser.parse_args_into_dataclasses()[0]
__lowercase =Plot(args=_lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 474
| 1
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCAmelCase_ = """\
Text data.
Second line of data."""
UpperCAmelCase_ = """file"""
@pytest.fixture(scope='session' )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
_snake_case = bytes(UpperCamelCase__ , 'utf-8' )
with zstd.open(UpperCamelCase__ , 'wb' ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture
def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , UpperCamelCase__ ) , 'w' ) as f:
f.write(UpperCamelCase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_snake_case = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
_snake_case = input_paths[compression_format]
_snake_case = tmp_path / 'cache'
_snake_case = DownloadConfig(cache_dir=UpperCamelCase__ , extract_compressed_file=UpperCamelCase__ )
_snake_case = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
with open(UpperCamelCase__ ) as f:
_snake_case = f.read()
with open(UpperCamelCase__ ) as f:
_snake_case = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Dict:
'''simple docstring'''
_snake_case = 'custom_cache'
_snake_case = 'custom_extracted_dir'
_snake_case = tmp_path / 'custom_extracted_path'
if default_extracted:
_snake_case = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , UpperCamelCase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(UpperCamelCase__ ) )
_snake_case = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_snake_case = xz_file
_snake_case = (
DownloadConfig(extract_compressed_file=UpperCamelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCamelCase__ )
)
_snake_case = cached_path(UpperCamelCase__ , download_config=UpperCamelCase__ )
assert Path(UpperCamelCase__ ).parent.parts[-2:] == expected
def lowerCamelCase__ ( UpperCamelCase__ : int ) -> str:
'''simple docstring'''
_snake_case = str(Path(UpperCamelCase__ ).resolve() )
assert cached_path(UpperCamelCase__ ) == text_file
# relative path
_snake_case = str(Path(UpperCamelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCamelCase__ ) == text_file
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_snake_case = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
# relative path
_snake_case = './__missing_file__.txt'
with pytest.raises(UpperCamelCase__ ):
cached_path(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_snake_case = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(UpperCamelCase__ ) as f:
_snake_case = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
http_get('https://huggingface.co' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
ftp_get('ftp://huggingface.co' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
_snake_case = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCamelCase__ ):
fsspec_get('s3://huggingface.co' , temp_file=UpperCamelCase__ )
with pytest.raises(UpperCamelCase__ ):
fsspec_head('s3://huggingface.co' )
| 541
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 541
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = 1
__a : List[Any] = 3
__a : str = (3_2, 3_2)
__a : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
return image
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : int = self.dummy_cond_unet_upscale
__a : Any = DDPMScheduler()
__a : Any = DDIMScheduler(prediction_type='v_prediction' )
__a : Any = self.dummy_vae
__a : List[str] = self.dummy_text_encoder
__a : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__a : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , max_noise_level=3_5_0 , )
__a : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = 'A painting of a squirrel eating a burger'
__a : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__a : List[str] = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a : Tuple = output.images
__a : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__a : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
__a : Dict = image[0, -3:, -3:, -1]
__a : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
__a : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__a : Dict = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
__a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.dummy_cond_unet_upscale
__a : Dict = DDPMScheduler()
__a : Dict = DDIMScheduler(prediction_type='v_prediction' )
__a : int = self.dummy_vae
__a : Optional[int] = self.dummy_text_encoder
__a : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : int = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
__a : Dict = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , max_noise_level=3_5_0 , )
__a : int = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : List[str] = 'A painting of a squirrel eating a burger'
__a : str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a : Any = output.images
assert image.shape[0] == 2
__a : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__a : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , )
__a : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Dict = self.dummy_cond_unet_upscale
__a : Any = DDPMScheduler()
__a : Tuple = DDIMScheduler(prediction_type='v_prediction' )
__a : Union[str, Any] = self.dummy_vae
__a : Dict = self.dummy_text_encoder
__a : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Dict = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert('RGB' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
__a : str = unet.half()
__a : Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
__a : Optional[Any] = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , max_noise_level=3_5_0 , )
__a : int = sd_pipe.to(SCREAMING_SNAKE_CASE__ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : str = 'A painting of a squirrel eating a burger'
__a : Tuple = torch.manual_seed(0 )
__a : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='np' , ).images
__a : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCamelCase( unittest.TestCase ):
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
__a : List[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__a : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
__a : Any = 'a cat sitting on a park bench'
__a : List[str] = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
__a : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
__a : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
__a : Any = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
__a : Optional[Any] = 'a cat sitting on a park bench'
__a : Any = torch.manual_seed(0 )
__a : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__a : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
__a : str = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : List[Any] = 'a cat sitting on a park bench'
__a : Union[str, Any] = torch.manual_seed(0 )
__a : Dict = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , output_type='np' , )
__a : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 47
|
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 47
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : Any = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ):
_UpperCAmelCase : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCAmelCase_ ):
_UpperCAmelCase : Dict = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def __A ( lowerCAmelCase_ = 1000 ):
_UpperCAmelCase : int = pythagorean_triple(lowerCAmelCase_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"Perimeter {solution()} has maximum solutions")
| 156
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
while b:
a_ , a_ = b, a % b
return a
def UpperCamelCase_ ( A__ , A__ ):
return a if b == 0 else euclidean_gcd_recursive(A__ , a % b )
def UpperCamelCase_ ( ):
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 263
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase__ =datasets.logging.get_logger(__name__)
lowercase__ ='\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
lowercase__ ='\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
lowercase__ ='\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def lowerCAmelCase__ ( self , UpperCAmelCase ):
if self.config_name == "default":
a_ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
a_ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ):
if gpus is None:
a_ = 1 if torch.cuda.is_available() else 0
a_ = {"""src""": sources, """mt""": predictions, """ref""": references}
a_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for t in zip(*data.values() )]
a_ , a_ = self.scorer.predict(UpperCAmelCase , gpus=UpperCAmelCase , progress_bar=UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 263
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _lowerCAmelCase ( _lowercase ):
A__ = 42
A__ = 42
A__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 470
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( _lowercase ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 470
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""")
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Any = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCamelCase : Tuple = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE , py_version="""py36""" , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(2,)])
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
# create estimator
_lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE)
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_lowerCamelCase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , SCREAMING_SNAKE_CASE)
| 88
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_snake_case = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , _lowercase=True ) -> List[Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models )
UpperCamelCase = config_class.from_json_file(_lowercase )
UpperCamelCase = True
UpperCamelCase = True
print(F'Building TensorFlow model from configuration: {config}' )
UpperCamelCase = model_class(_lowercase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCamelCase = cached_file(
_lowercase , _lowercase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCamelCase = load_pytorch_checkpoint_in_tfa_model(_lowercase , _lowercase )
if compare_with_pt_model:
UpperCamelCase = tf_model(tf_model.dummy_inputs , training=_lowercase ) # build the network
UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
UpperCamelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_lowercase , config=_lowercase , state_dict=_lowercase )
with torch.no_grad():
UpperCamelCase = pt_model(**pt_model.dummy_inputs )
UpperCamelCase = pto[0].numpy()
UpperCamelCase = tfo[0].numpy()
UpperCamelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2e-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(_lowercase , save_format='h5' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=False , ) -> int:
if args_model_type is None:
UpperCamelCase = list(MODEL_CLASSES.keys() )
else:
UpperCamelCase = [args_model_type]
for j, model_type in enumerate(_lowercase , start=1 ):
print('=' * 100 )
print(F' Converting model type {j}/{len(_lowercase )}: {model_type}' )
print('=' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCamelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCamelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_lowercase , _lowercase ) , start=1 ):
print('-' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
UpperCamelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(_lowercase )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 100 )
if config_shortcut_name in aws_config_map:
UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models )
else:
UpperCamelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCamelCase = cached_file(_lowercase , _lowercase , force_download=not use_cached_models )
else:
UpperCamelCase = model_shortcut_name
if os.path.isfile(_lowercase ):
UpperCamelCase = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=_lowercase , pytorch_checkpoint_path=_lowercase , config_file=_lowercase , tf_dump_path=os.path.join(_lowercase , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=_lowercase , )
if remove_cached_files:
os.remove(_lowercase )
os.remove(_lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
_snake_case = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 282
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__UpperCAmelCase :str = logging.get_logger(__name__)
@add_end_docstrings(_a )
class a ( _a ):
"""simple docstring"""
def __init__( self : str , **snake_case : Dict ) -> Any:
super().__init__(**snake_case )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , snake_case : Union[np.ndarray, bytes, str] , **snake_case : Dict ) -> List[Any]:
return super().__call__(snake_case , **snake_case )
def lowerCamelCase__ ( self : int , **snake_case : int ) -> Dict:
__UpperCAmelCase : List[str] = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : List[Any] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCAmelCase : Tuple = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Optional[int]=None , snake_case : List[Any]="This is a sound of {}." ) -> Tuple:
if isinstance(snake_case , snake_case ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__UpperCAmelCase : List[str] = requests.get(snake_case ).content
else:
with open(snake_case , '''rb''' ) as f:
__UpperCAmelCase : Optional[int] = f.read()
if isinstance(snake_case , snake_case ):
__UpperCAmelCase : Optional[Any] = ffmpeg_read(snake_case , self.feature_extractor.sampling_rate )
if not isinstance(snake_case , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__UpperCAmelCase : List[str] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__UpperCAmelCase : Tuple = candidate_labels
__UpperCAmelCase : Optional[Any] = [hypothesis_template.format(snake_case ) for x in candidate_labels]
__UpperCAmelCase : Union[str, Any] = self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case )
__UpperCAmelCase : Union[str, Any] = [text_inputs]
return inputs
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Any = model_inputs.pop('''candidate_labels''' )
__UpperCAmelCase : List[Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , snake_case ):
__UpperCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Any = text_inputs[0][0]
__UpperCAmelCase : Optional[int] = self.model(**snake_case , **snake_case )
__UpperCAmelCase : Optional[Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCamelCase__ ( self : str , snake_case : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : int = model_outputs.pop('''candidate_labels''' )
__UpperCAmelCase : List[str] = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCAmelCase : Any = logits.softmax(dim=0 )
__UpperCAmelCase : Any = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__UpperCAmelCase : int = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case ) , key=lambda snake_case : -x[0] )
]
return result
| 266
|
'''simple docstring'''
def _a ( _lowercase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = len(_lowercase )
while cur > 1:
# Find the maximum number in arr
__UpperCAmelCase : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCAmelCase : Any = arr[mi::-1] + arr[mi + 1 : len(_lowercase )]
# Reverse whole list
__UpperCAmelCase : List[str] = arr[cur - 1 :: -1] + arr[cur : len(_lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCAmelCase :Any = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase :Optional[int] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 266
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''FlavaImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
__magic_name__ :Any = kwargs.pop('''feature_extractor''' )
__magic_name__ :Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Union[str, Any] = self.image_processor
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__magic_name__ :Any = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if images is not None:
__magic_name__ :Tuple = self.image_processor(
__lowerCAmelCase , return_image_mask=__lowerCAmelCase , return_codebook_pixels=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
if text is not None and images is not None:
encoding.update(__lowerCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.tokenizer.model_input_names
__magic_name__ :Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def A ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 0
|
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0
| 1
|
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
F"""{test_file} instead.""" )
_lowercase = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_lowercase = components[:-1] + [test_fn.replace(""".py""" , """""" )]
_lowercase = """.""".join(snake_case_ )
return test_module_path
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_module_path(snake_case_ )
_lowercase = importlib.import_module(snake_case_ )
return test_module
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = []
_lowercase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(snake_case_ , snake_case_ ) )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = []
_lowercase = get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
_lowercase = getattr(snake_case_ , snake_case_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowercase = getattr(snake_case_ , """all_model_classes""" , [] )
if len(snake_case_ ) > 0:
test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_test_classes(snake_case_ )
_lowercase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = test_class()
if hasattr(snake_case_ , """setUp""" ):
test.setUp()
_lowercase = None
if hasattr(snake_case_ , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowercase = test.model_tester.__class__
return model_tester
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
_lowercase = get_test_classes(snake_case_ )
_lowercase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
_lowercase = get_test_classes_for_model(snake_case_ , snake_case_ )
_lowercase = []
for test_class in test_classes:
_lowercase = get_model_tester_from_test_class(snake_case_ )
if tester_class is not None:
tester_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_test_classes(snake_case_ )
_lowercase = {test_class: get_model_tester_from_test_class(snake_case_ ) for test_class in test_classes}
return test_tester_mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_model_classes(snake_case_ )
_lowercase = {
model_class: get_test_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_test_mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
_lowercase = get_model_classes(snake_case_ )
_lowercase = {
model_class: get_tester_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_to_tester_mapping
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
return o
elif isinstance(snake_case_ , snake_case_ ):
return o.__name__
elif isinstance(snake_case_ , (list, tuple) ):
return [to_json(snake_case_ ) for x in o]
elif isinstance(snake_case_ , snake_case_ ):
return {to_json(snake_case_ ): to_json(snake_case_ ) for k, v in o.items()}
else:
return o
| 572
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Dict , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=7 , lowercase__ : Dict=3 , lowercase__ : Optional[int]=18 , lowercase__ : Any=30 , lowercase__ : Tuple=4_00 , lowercase__ : Dict=True , lowercase__ : List[str]=None , lowercase__ : Tuple=True , lowercase__ : Optional[int]=None , lowercase__ : Any=True , lowercase__ : Union[str, Any]=[0.5, 0.5, 0.5] , lowercase__ : Tuple=[0.5, 0.5, 0.5] , lowercase__ : Optional[Any]=False , ) ->str:
"""simple docstring"""
_lowercase = size if size is not None else {"""height""": 20, """width""": 20}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_reduce_labels
def _UpperCAmelCase ( self : Union[str, Any]) ->str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(dataset[0]["""file"""] )
_lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(ds[0]["""file"""] )
_lowercase = Image.open(ds[1]["""file"""] )
_lowercase = Image.open(ds[2]["""file"""] )
_lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __a ( _snake_case ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = BeitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Any) ->str:
"""simple docstring"""
_lowercase = BeitImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , """do_resize"""))
self.assertTrue(hasattr(lowercase__ , """size"""))
self.assertTrue(hasattr(lowercase__ , """do_center_crop"""))
self.assertTrue(hasattr(lowercase__ , """center_crop"""))
self.assertTrue(hasattr(lowercase__ , """do_normalize"""))
self.assertTrue(hasattr(lowercase__ , """image_mean"""))
self.assertTrue(hasattr(lowercase__ , """image_std"""))
def _UpperCAmelCase ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
_lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase__)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : str) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
_lowercase = []
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
_lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test not batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_batch_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
def _UpperCAmelCase ( self : Dict) ->Optional[Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 1_50)
_lowercase = True
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
| 572
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
UpperCamelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> Tuple:
"""simple docstring"""
with open(UpperCAmelCase_, "rb" ) as f:
A__ = Image.open(UpperCAmelCase_ )
return im.convert("RGB" )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A__ : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the training data."} )
A__ : Optional[str] = field(default=_lowerCAmelCase , metadata={"help": "A folder containing the validation data."} )
A__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
A__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case__ ( self ) -> Optional[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
A__ : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowerCAmelCase )} , )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=_lowerCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
A__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A__ : str = field(default=_lowerCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
A__ : bool = field(
default=_lowerCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = torch.stack([example["pixel_values"] for example in examples] )
A__ = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification", UpperCAmelCase_, UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A__ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task="image-classification", use_auth_token=True if model_args.use_auth_token else None, )
else:
A__ = {}
if data_args.train_dir is not None:
A__ = os.path.join(data_args.train_dir, "**" )
if data_args.validation_dir is not None:
A__ = os.path.join(data_args.validation_dir, "**" )
A__ = load_dataset(
"imagefolder", data_files=UpperCAmelCase_, cache_dir=model_args.cache_dir, task="image-classification", )
# If we don't have a validation split, split off a percentage of train as validation.
A__ = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, UpperCAmelCase_ ) and data_args.train_val_split > 0.0:
A__ = dataset["train"].train_test_split(data_args.train_val_split )
A__ = split["train"]
A__ = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A__ = dataset["train"].features["labels"].names
A__ , A__ = {}, {}
for i, label in enumerate(UpperCAmelCase_ ):
A__ = str(UpperCAmelCase_ )
A__ = label
# Load the accuracy metric from the datasets package
A__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase_ : int ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(UpperCAmelCase_ ), labelaid=UpperCAmelCase_, idalabel=UpperCAmelCase_, finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=UpperCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A__ = image_processor.size["shortest_edge"]
else:
A__ = (image_processor.size["height"], image_processor.size["width"])
A__ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A__ = Compose(
[
RandomResizedCrop(UpperCAmelCase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A__ = Compose(
[
Resize(UpperCAmelCase_ ),
CenterCrop(UpperCAmelCase_ ),
ToTensor(),
normalize,
] )
def train_transforms(UpperCAmelCase_ : Tuple ):
A__ = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(UpperCAmelCase_ : Any ):
A__ = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
A__ = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(UpperCAmelCase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
A__ = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(UpperCAmelCase_ )
# Initalize our trainer
A__ = Trainer(
model=UpperCAmelCase_, args=UpperCAmelCase_, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=UpperCAmelCase_, tokenizer=UpperCAmelCase_, data_collator=UpperCAmelCase_, )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A__ = trainer.evaluate()
trainer.log_metrics("eval", UpperCAmelCase_ )
trainer.save_metrics("eval", UpperCAmelCase_ )
# Write model card and (optionally) push to hub
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 104
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " )
SCREAMING_SNAKE_CASE_: Tuple = 0
SCREAMING_SNAKE_CASE_: str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = True
SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
SCREAMING_SNAKE_CASE_: Union[str, Any] = line
SCREAMING_SNAKE_CASE_: List[str] = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Dict = title
SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0]
SCREAMING_SNAKE_CASE_: int = doc_test_results["success"]
SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"]
SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: int = [self._time_spent]
SCREAMING_SNAKE_CASE_: List[Any] = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__) == 1:
SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = 40
SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)}
SCREAMING_SNAKE_CASE_: Tuple = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
SCREAMING_SNAKE_CASE_: List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(lowerCAmelCase__)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = ""
for key, value in failures.items():
SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value
failures_text += F"*{key}*\n_{value}_\n\n"
SCREAMING_SNAKE_CASE_: Any = job_name
SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_: Tuple = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link")
self.doc_test_results.pop("failures")
self.doc_test_results.pop("success")
self.doc_test_results.pop("time_spent")
SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n"
SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"]
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"]
SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json()
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _UpperCAmelCase )
return {}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if os.path.exists(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e
return _artifact
def A_ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = name
SCREAMING_SNAKE_CASE_: List[str] = []
def __str__( self : Optional[Any]):
return self.name
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str):
self.paths.append({"name": self.name, "path": path})
SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {}
SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_: Dict = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase : Tuple = get_job_links()
lowerCAmelCase : Optional[Any] = retrieve_available_artifacts()
lowerCAmelCase : Any = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase : int = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""")
lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""])
lowerCAmelCase : List[str] = failed
lowerCAmelCase : Any = success
lowerCAmelCase : Dict = time_spent[1:-1] + """, """
lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCAmelCase : Tuple = line.replace("""FAILED """, """""")
lowerCAmelCase : str = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""")
else:
lowerCAmelCase , lowerCAmelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A"""
lowerCAmelCase : Any = failure
break
lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 671
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase__ :
"""simple docstring"""
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : "AutoTokenizer" , UpperCamelCase : bool = False , **UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = tokenizer
__UpperCAmelCase : Union[str, Any] = skip_prompt
__UpperCAmelCase : List[str] = decode_kwargs
# variables used in the streaming process
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Union[str, Any] = True
def lowerCamelCase__ ( self : str , UpperCamelCase : List[str] ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
__UpperCAmelCase : Optional[int] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__UpperCAmelCase : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__UpperCAmelCase : List[str] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
__UpperCAmelCase : Tuple = text[self.print_len :]
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Union[str, Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__UpperCAmelCase : List[str] = text[self.print_len :]
self.print_len += len(UpperCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__UpperCAmelCase : List[str] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(UpperCamelCase )
self.on_finalized_text(UpperCamelCase )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
if len(self.token_cache ) > 0:
__UpperCAmelCase : Optional[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__UpperCAmelCase : Union[str, Any] = text[self.print_len :]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Dict = 0
else:
__UpperCAmelCase : Tuple = """"""
__UpperCAmelCase : Tuple = True
self.on_finalized_text(UpperCamelCase , stream_end=UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
print(UpperCamelCase , flush=UpperCamelCase , end="""""" if not stream_end else None )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Tuple ):
'''simple docstring'''
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase : "AutoTokenizer" , UpperCamelCase : bool = False , UpperCamelCase : Optional[float] = None , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = Queue()
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = timeout
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : bool = False ):
'''simple docstring'''
self.text_queue.put(UpperCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Tuple ):
'''simple docstring'''
return self
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 299
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCamelCase : int ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : str = {}
# preprocess args
if "points_per_batch" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
__UpperCAmelCase : str = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
__UpperCAmelCase : Any = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
__UpperCAmelCase : Any = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
__UpperCAmelCase : Dict = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
__UpperCAmelCase : Dict = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
__UpperCAmelCase : int = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
__UpperCAmelCase : Optional[Any] = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
__UpperCAmelCase : str = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , UpperCamelCase : Optional[int] , *UpperCamelCase : int , UpperCamelCase : Dict=None , UpperCamelCase : List[str]=None , **UpperCamelCase : str ):
'''simple docstring'''
return super().__call__(UpperCamelCase , *UpperCamelCase , num_workers=UpperCamelCase , batch_size=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=64 , UpperCamelCase : int = 0 , UpperCamelCase : float = 512 / 1_500 , UpperCamelCase : Optional[int] = 32 , UpperCamelCase : Optional[int] = 1 , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = load_image(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = self.image_processor.size["""longest_edge"""]
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = self.image_processor.generate_crop_boxes(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Any = self.image_processor(images=UpperCamelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
__UpperCAmelCase : Union[str, Any] = self.get_inference_context()
with inference_context():
__UpperCAmelCase : Tuple = self._ensure_tensor_on_device(UpperCamelCase , device=self.device )
__UpperCAmelCase : List[str] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
__UpperCAmelCase : Optional[int] = image_embeddings
__UpperCAmelCase : List[str] = grid_points.shape[1]
__UpperCAmelCase : Optional[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : str = grid_points[:, i : i + points_per_batch, :, :]
__UpperCAmelCase : Union[str, Any] = input_labels[:, i : i + points_per_batch]
__UpperCAmelCase : Optional[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=0.88 , UpperCamelCase : Union[str, Any]=0.95 , UpperCamelCase : int=0 , UpperCamelCase : Optional[int]=1 , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = model_inputs.pop("""input_boxes""" )
__UpperCAmelCase : Tuple = model_inputs.pop("""is_last""" )
__UpperCAmelCase : List[Any] = model_inputs.pop("""original_sizes""" ).tolist()
__UpperCAmelCase : Optional[int] = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
__UpperCAmelCase : str = self.model(**UpperCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__UpperCAmelCase : Dict = model_outputs["""pred_masks"""]
__UpperCAmelCase : Union[str, Any] = self.image_processor.post_process_masks(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , binarize=UpperCamelCase )
__UpperCAmelCase : Optional[int] = model_outputs["""iou_scores"""]
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=False , UpperCamelCase : int=False , UpperCamelCase : int=0.7 , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : int = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
__UpperCAmelCase : List[Any] = torch.cat(UpperCamelCase )
__UpperCAmelCase : List[str] = torch.cat(UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = self.image_processor.post_process_for_mask_generation(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[Any] = defaultdict(UpperCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(UpperCamelCase )
__UpperCAmelCase : int = {}
if output_rle_mask:
__UpperCAmelCase : Union[str, Any] = rle_mask
if output_bboxes_mask:
__UpperCAmelCase : int = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 299
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase__ : List[str] =logging.get_logger(__name__)
class __lowercase (_lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ):
"""simple docstring"""
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ )
| 101
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = IFInpaintingSuperResolutionPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def snake_case ( self: Optional[Any] ):
return self._get_superresolution_dummy_components()
def snake_case ( self: List[str] ,a: str ,a: Tuple=0 ):
if str(a ).startswith('mps' ):
__UpperCAmelCase = torch.manual_seed(a )
else:
__UpperCAmelCase = torch.Generator(device=a ).manual_seed(a )
__UpperCAmelCase = floats_tensor((1, 3, 16, 16) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(a ) ).to(a )
__UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def snake_case ( self: Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def snake_case ( self: int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def snake_case ( self: Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def snake_case ( self: Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def snake_case ( self: List[Any] ):
self._test_save_load_local()
def snake_case ( self: Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 ,)
| 396
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 413
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''')
lowerCAmelCase_ = model
lowerCAmelCase_ = kwargs.get('''model_save_dir''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.get('''latest_model_name''' , _UpperCAmelCase)
def __call__( self , **_UpperCAmelCase):
lowerCAmelCase_ = {k: np.array(_UpperCAmelCase) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase)
@staticmethod
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''')
lowerCAmelCase_ = '''CPUExecutionProvider'''
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name)
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ = self.model_save_dir.joinpath(_UpperCAmelCase)
if src_path.exists():
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
def lowercase__ ( self , _UpperCAmelCase , **_UpperCAmelCase , ):
if os.path.isfile(_UpperCAmelCase):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase):
lowerCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
lowerCAmelCase_ = Path(_UpperCAmelCase)
# load model from hub
else:
# download model
lowerCAmelCase_ = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
lowerCAmelCase_ = Path(_UpperCAmelCase).parent
lowerCAmelCase_ = Path(_UpperCAmelCase).name
lowerCAmelCase_ = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
return cls(model=_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = None
if len(str(_UpperCAmelCase).split('''@''')) == 2:
lowerCAmelCase_ , lowerCAmelCase_ = model_id.split('''@''')
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 413
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ : int = random.Random()
if is_torch_available():
import torch
def a__ ( lowercase : Optional[int], lowercase : Optional[int]=1.0, lowercase : Optional[int]=None, lowercase : List[Any]=None ) -> str:
"""simple docstring"""
if rng is None:
_UpperCamelCase = global_rng
_UpperCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Optional[Any]=400 , lowerCAmelCase__ : Optional[Any]=2000 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : List[str]=16000 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[Any]=True , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = min_seq_length
_UpperCamelCase = max_seq_length
_UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase = feature_size
_UpperCamelCase = padding_value
_UpperCamelCase = sampling_rate
_UpperCamelCase = return_attention_mask
_UpperCamelCase = do_normalize
def snake_case__ ( self : List[Any] ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : Any , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : str=False ) -> Dict:
'''simple docstring'''
def _flatten(lowerCAmelCase__ : Union[str, Any] ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
_UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = ASTFeatureExtractor
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ASTFeatureExtractionTester(self )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCamelCase = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
_UpperCamelCase = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase = np.asarray(lowerCAmelCase__ )
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
_UpperCamelCase = feat_extract(lowerCAmelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
@require_torch
def snake_case__ ( self : str ) -> Tuple:
'''simple docstring'''
import torch
_UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
from datasets import load_dataset
_UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_UpperCamelCase = ds.sort('''id''' ).select(range(lowerCAmelCase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
_UpperCamelCase = self._load_datasamples(1 )
_UpperCamelCase = ASTFeatureExtractor()
_UpperCamelCase = feature_extractor(lowerCAmelCase__ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1e-4 ) )
| 98
|
def lowerCamelCase__ ( snake_case_ : int = 1000 ) -> int:
__snake_case = 2**power
__snake_case = str(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = 0
for i in list_num:
sum_of_num += int(snake_case_ )
return sum_of_num
if __name__ == "__main__":
snake_case_ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case_ = solution(power)
print('Sum of the digits is: ', result)
| 592
| 0
|
"""simple docstring"""
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : str = ["input_values", "attention_mask"]
def __init__( self : str , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 1_6_0_0_0 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : bool = False , __lowerCAmelCase : int = 8_0 , __lowerCAmelCase : int = 1_6 , __lowerCAmelCase : int = 6_4 , __lowerCAmelCase : str = "hann_window" , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : float = 8_0 , __lowerCAmelCase : float = 7_6_0_0 , __lowerCAmelCase : float = 1E-10 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : Optional[int] = return_attention_mask
_lowerCamelCase : int = num_mel_bins
_lowerCamelCase : Optional[Any] = hop_length
_lowerCamelCase : Optional[int] = win_length
_lowerCamelCase : Any = win_function
_lowerCamelCase : Tuple = frame_signal_scale
_lowerCamelCase : List[Any] = fmin
_lowerCamelCase : Dict = fmax
_lowerCamelCase : List[Any] = mel_floor
_lowerCamelCase : Dict = reduction_factor
_lowerCamelCase : Union[str, Any] = win_length * sampling_rate // 1_0_0_0
_lowerCamelCase : List[str] = hop_length * sampling_rate // 1_0_0_0
_lowerCamelCase : Any = optimal_fft_length(self.sample_size )
_lowerCamelCase : Optional[int] = (self.n_fft // 2) + 1
_lowerCamelCase : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase )
_lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , __lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , __lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
_lowerCamelCase : str = np.array(__lowerCAmelCase , np.intaa )
_lowerCamelCase : Optional[Any] = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ):
_lowerCamelCase : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_lowerCamelCase : List[Any] = padding_value
normed_input_values.append(__lowerCAmelCase )
else:
_lowerCamelCase : Optional[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : np.ndarray , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = spectrogram(
__lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __lowerCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
_lowerCamelCase : List[str] = self._process_audio(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , )
else:
_lowerCamelCase : Any = None
if audio_target is not None:
_lowerCamelCase : Union[str, Any] = self._process_audio(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
_lowerCamelCase : Optional[int] = inputs_target['''input_values''']
_lowerCamelCase : Optional[Any] = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
_lowerCamelCase : Any = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : bool = False , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : Any , ):
"""simple docstring"""
_lowerCamelCase : str = isinstance(__lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase : Any = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : Tuple = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
_lowerCamelCase : Optional[Any] = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : int = [speech]
# needed to make pad() work on spectrogram inputs
_lowerCamelCase : Any = self.feature_size
# convert into correct format for padding
if is_target:
_lowerCamelCase : Optional[int] = [self._extract_mel_features(__lowerCAmelCase ) for waveform in speech]
_lowerCamelCase : str = BatchFeature({'''input_values''': features} )
_lowerCamelCase : Tuple = self.num_mel_bins
else:
_lowerCamelCase : Optional[int] = BatchFeature({'''input_values''': speech} )
_lowerCamelCase : Optional[Any] = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = feature_size_hack
# convert input values to correct format
_lowerCamelCase : int = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
_lowerCamelCase : List[str] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__lowerCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_lowerCamelCase : str = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_lowerCamelCase : str = input_values.astype(np.floataa )
# convert attention_mask to correct format
_lowerCamelCase : str = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_lowerCamelCase : Any = [np.asarray(__lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_lowerCamelCase : Optional[Any] = (
attention_mask
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowerCamelCase : str = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=__lowerCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
_lowerCamelCase : List[Any] = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_lowerCamelCase : List[Any] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 705
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(R'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(R'''^\s*else:''')
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(A_ ) is None:
return None
_lowerCamelCase : Dict = [b[0] for b in _re_backend.findall(A_ )]
backends.sort()
return "_and_".join(A_ )
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
with open(A_, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
_lowerCamelCase : List[str] = f.readlines()
_lowerCamelCase : Dict = 0
while line_index < len(A_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A_ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCamelCase : Union[str, Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_lowerCamelCase : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A_ ):
_lowerCamelCase : Union[str, Any] = _re_one_line_import_struct.search(A_ ).groups()[0]
_lowerCamelCase : List[str] = re.findall(R'''\[([^\]]+)\]''', A_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_lowerCamelCase : List[Any] = _re_import_struct_key_value.search(A_ )
if single_line_import_search is not None:
_lowerCamelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A_ ) > 0]
objects.extend(A_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_lowerCamelCase : Optional[int] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_lowerCamelCase : List[str] = lines[line_index]
if _re_import_struct_add_one.search(A_ ) is not None:
objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] )
elif _re_import_struct_add_many.search(A_ ) is not None:
_lowerCamelCase : Optional[Any] = _re_import_struct_add_many.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_between_brackets.search(A_ ) is not None:
_lowerCamelCase : Union[str, Any] = _re_between_brackets.search(A_ ).groups()[0].split(''', ''' )
_lowerCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(A_ ) > 0]
objects.extend(A_ )
elif _re_quote_object.search(A_ ) is not None:
objects.append(_re_quote_object.search(A_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_lowerCamelCase : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCamelCase : Optional[int] = []
while (
line_index < len(A_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_lowerCamelCase : List[str] = lines[line_index]
_lowerCamelCase : List[str] = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCamelCase : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A_ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCamelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCamelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_lowerCamelCase : int = lines[line_index]
_lowerCamelCase : Any = _re_import.search(A_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCamelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def snake_case_ ( A_ : Dict, A_ : List[Any] ):
'''simple docstring'''
def find_duplicates(A_ : Dict ):
return [k for k, v in collections.Counter(A_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCamelCase : Optional[int] = []
for key in import_dict_objects.keys():
_lowerCamelCase : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_lowerCamelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCamelCase : str = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
for root, _, files in os.walk(A_ ):
if "__init__.py" in files:
_lowerCamelCase : Union[str, Any] = os.path.join(A_, '''__init__.py''' )
_lowerCamelCase : List[str] = parse_init(A_ )
if objects is not None:
_lowerCamelCase : str = analyze_results(*A_ )
if len(A_ ) > 0:
_lowerCamelCase : Dict = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(A_ ) )
if len(A_ ) > 0:
raise ValueError('''\n\n'''.join(A_ ) )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = []
for path, directories, files in os.walk(A_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_lowerCamelCase : str = str((Path(A_ ) / folder).relative_to(A_ ) )
_lowerCamelCase : Any = short_path.replace(os.path.sep, '''.''' )
submodules.append(A_ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCamelCase : List[str] = str((Path(A_ ) / fname).relative_to(A_ ) )
_lowerCamelCase : Any = short_path.replace('''.py''', '''''' ).replace(os.path.sep, '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A_ )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def snake_case_ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
_lowerCamelCase : Union[str, Any] = direct_transformers_import(A_ )
_lowerCamelCase : Union[str, Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A_, '''__init__.py''' ), '''r''' ) as f:
_lowerCamelCase : Tuple = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''', A_ ) ) )
_lowerCamelCase : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A_ ) > 0:
_lowerCamelCase : Optional[Any] = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 598
| 0
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
__A : Tuple = logging.getLogger(__name__)
__A : str = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = 'bertabs'
def __init__( self , _a=3_0522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ):
"""simple docstring"""
super().__init__(**_a )
a__ = vocab_size
a__ = max_pos
a__ = enc_layers
a__ = enc_hidden_size
a__ = enc_heads
a__ = enc_ff_size
a__ = enc_dropout
a__ = dec_layers
a__ = dec_hidden_size
a__ = dec_heads
a__ = dec_ff_size
a__ = dec_dropout
| 394
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:List[Any] = 'gptsan-japanese'
SCREAMING_SNAKE_CASE:str = [
'past_key_values',
]
SCREAMING_SNAKE_CASE:int = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=3_6000 , _a=1280 , _a=1024 , _a=8192 , _a=4096 , _a=128 , _a=10 , _a=0 , _a=16 , _a=16 , _a=128 , _a=0.0 , _a=1e-5 , _a=False , _a=0.0 , _a="float32" , _a=False , _a=False , _a=False , _a=0.002 , _a=False , _a=True , _a=3_5998 , _a=3_5995 , _a=3_5999 , **_a , ):
"""simple docstring"""
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = d_ff
a__ = d_ext
a__ = d_spout
a__ = num_switch_layers
a__ = num_ext_layers
a__ = num_switch_layers + num_ext_layers
a__ = num_heads
a__ = num_experts
a__ = expert_capacity
a__ = dropout_rate
a__ = layer_norm_epsilon
a__ = router_bias
a__ = router_jitter_noise
a__ = router_dtype
a__ = router_ignore_padding_tokens
a__ = output_hidden_states
a__ = output_attentions
a__ = initializer_factor
a__ = output_router_logits
a__ = use_cache
super().__init__(
separator_token_id=_a , pad_token_id=_a , eos_token_id=_a , **_a , )
| 394
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = CTRLTokenizer
lowercase = False
lowercase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
__UpperCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
__UpperCamelCase = {'unk_token': '<unk>'}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 'adapt react readapt apt'
__UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase = 'adapt react readapt apt'
__UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
__UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = tokens + [tokenizer.unk_token]
__UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
| 704
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A ( snake_case :Optional[int] ) -> str:
__UpperCamelCase = torch.exp(snake_case )
__UpperCamelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
__UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = config.output_attentions
__UpperCamelCase = config.output_hidden_states
__UpperCamelCase = nn.ModuleList([BertLayer(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase = nn.ModuleList([BertHighway(__UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
__UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if (type(__UpperCAmelCase ) is float) or (type(__UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__UpperCamelCase = x
else:
__UpperCamelCase = x
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = ()
__UpperCamelCase = ()
__UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__UpperCamelCase = all_hidden_states + (hidden_states,)
__UpperCamelCase = layer_module(
__UpperCAmelCase , __UpperCAmelCase , head_mask[i] , __UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = layer_outputs[0]
if self.output_attentions:
__UpperCamelCase = all_attentions + (layer_outputs[1],)
__UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase = current_outputs + (all_attentions,)
__UpperCamelCase = self.highway[i](__UpperCAmelCase )
# logits, pooled_output
if not self.training:
__UpperCamelCase = highway_exit[0]
__UpperCamelCase = entropy(__UpperCAmelCase )
__UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__UpperCAmelCase , i + 1 )
else:
__UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__UpperCamelCase = all_hidden_states + (hidden_states,)
__UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
__UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
__UpperCamelCase = outputs + (all_attentions,)
__UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config
__UpperCamelCase = BertEmbeddings(__UpperCAmelCase )
__UpperCamelCase = DeeBertEncoder(__UpperCAmelCase )
__UpperCamelCase = BertPooler(__UpperCAmelCase )
self.init_weights()
def UpperCAmelCase ( self ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = value
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__UpperCAmelCase )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
__UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__UpperCamelCase = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if encoder_attention_mask is None:
__UpperCamelCase = torch.ones(__UpperCAmelCase , device=__UpperCAmelCase )
if token_type_ids is None:
__UpperCamelCase = torch.zeros(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__UpperCamelCase = self.get_extended_attention_mask(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__UpperCamelCase = encoder_attention_mask[:, None, None, :]
__UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__UpperCamelCase = self.get_head_mask(__UpperCAmelCase , self.config.num_hidden_layers )
__UpperCamelCase = self.embeddings(
input_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase )
__UpperCamelCase = self.encoder(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(__UpperCAmelCase )
__UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = message
__UpperCamelCase = exit_layer # start from 1!
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__UpperCamelCase = BertPooler(__UpperCAmelCase )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = encoder_outputs[0]
__UpperCamelCase = self.pooler(__UpperCAmelCase )
# "return" pooler_output
# BertModel
__UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__UpperCamelCase = bmodel_output[1]
__UpperCamelCase = self.dropout(__UpperCAmelCase )
__UpperCamelCase = self.classifier(__UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeBertModel(__UpperCAmelCase )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=-1 , __UpperCAmelCase=False , ):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.bert(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , position_ids=__UpperCAmelCase , head_mask=__UpperCAmelCase , inputs_embeds=__UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(__UpperCAmelCase )
__UpperCamelCase = self.classifier(__UpperCAmelCase )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(__UpperCAmelCase )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 293
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = None
lowerCamelCase__ = field(default='''Translation''' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] ) -> str:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __A ( self : Optional[Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = None
lowerCamelCase__ = field(default='''TranslationVariableLanguages''' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__ )
def __A ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> str:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __A ( self : int , __magic_name__ : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = set(self.languages )
if self.languages and set(__magic_name__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({", ".join(sorted(set(__magic_name__ ) - lang_set ) )}) are not in valid set ({", ".join(__magic_name__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ = []
for lang, text in translation_dict.items():
if isinstance(__magic_name__ , __magic_name__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = zip(*sorted(__magic_name__ ) )
return {"language": languages, "translation": translations}
def __A ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 140
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Tuple = "pt"
elif is_tf_available():
A : Optional[int] = "tf"
else:
A : Optional[Any] = "jax"
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ByTaTokenizer
lowerCamelCase__ = False
def __A ( self : Union[str, Any] ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self : Tuple ) -> Union[str, Any]:
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def __A ( self : List[Any] , **__magic_name__ : Dict ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=False , __magic_name__ : Optional[int]=20 , __magic_name__ : Any=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
SCREAMING_SNAKE_CASE_ = []
for i in range(len(__magic_name__ ) ):
try:
SCREAMING_SNAKE_CASE_ = tokenizer.decode([i] , clean_up_tokenization_spaces=__magic_name__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __magic_name__ ) )
SCREAMING_SNAKE_CASE_ = list(filter(lambda __magic_name__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__magic_name__ ) , __magic_name__ ) )
if max_length is not None and len(__magic_name__ ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(__magic_name__ ) < min_length and len(__magic_name__ ) > 0:
while len(__magic_name__ ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
if " " not in output_txt and len(__magic_name__ ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__magic_name__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__magic_name__ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = " " + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
return output_txt, output_ids
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
SCREAMING_SNAKE_CASE_ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] )
def __A ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = "Unicode €."
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "Unicode €.</s>" )
SCREAMING_SNAKE_CASE_ = tokenizer("e è é ê ë" )
SCREAMING_SNAKE_CASE_ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"] , __magic_name__ )
# decoding
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" )
def __A ( self : Any ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
if FRAMEWORK != "jax":
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.numpy()[0] )
else:
SCREAMING_SNAKE_CASE_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors=__magic_name__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __magic_name__ )
self.assertIn("attention_mask" , __magic_name__ )
self.assertNotIn("decoder_input_ids" , __magic_name__ )
self.assertNotIn("decoder_attention_mask" , __magic_name__ )
def __A ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = [
"Summary of the text.",
"Another summary.",
]
SCREAMING_SNAKE_CASE_ = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="max_length" , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.ta_base_tokenizer
SCREAMING_SNAKE_CASE_ = ["A long paragraph for summarization. </s>"]
SCREAMING_SNAKE_CASE_ = ["Summary of the text. </s>"]
# fmt: off
SCREAMING_SNAKE_CASE_ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
SCREAMING_SNAKE_CASE_ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , text_target=__magic_name__ )
self.assertEqual(__magic_name__ , batch["input_ids"][0] )
self.assertEqual(__magic_name__ , batch["labels"][0] )
def __A ( self : Dict ) -> List[str]:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
SCREAMING_SNAKE_CASE_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE_ = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def __A ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [F'''<extra_id_{i}>''' for i in range(125 )]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__magic_name__ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__magic_name__ )]
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def __A ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(__magic_name__ )
self.assertTrue(tokenizer.decode([255] ) == "" )
def __A ( self : Optional[int] ) -> List[Any]:
pass
def __A ( self : List[Any] ) -> List[Any]:
pass
def __A ( self : List[str] ) -> List[str]:
pass
def __A ( self : Any ) -> Union[str, Any]:
pass
def __A ( self : List[Any] ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=__magic_name__ , do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(
__magic_name__ , skip_special_tokens=__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , attr + "_id" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + "_id" ) , __magic_name__ )
setattr(__magic_name__ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [] )
setattr(__magic_name__ , "additional_special_tokens_ids" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__magic_name__ , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
| 140
| 1
|
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowercase : Dict = True
from torch.cuda.amp import autocast
lowercase : Dict = logging.getLogger(__name__)
def __a ( A__=None , A__=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCAmelCase = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCAmelCase = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCAmelCase = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCAmelCase = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCAmelCase = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCAmelCase = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCAmelCase = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = True
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
lowerCAmelCase = [{"input_values": feature["input_values"]} for feature in features]
lowerCAmelCase = [{"input_ids": feature["labels"]} for feature in features]
lowerCAmelCase = self.processor.pad(
SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
lowerCAmelCase = self.processor.pad(
labels=SCREAMING_SNAKE_CASE , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
lowerCAmelCase = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
lowerCAmelCase = labels
return batch
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __A ( self : List[str] , SCREAMING_SNAKE_CASE : nn.Module , SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
lowerCAmelCase = self._prepare_inputs(SCREAMING_SNAKE_CASE )
if self.use_amp:
with autocast():
lowerCAmelCase = self.compute_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = self.compute_loss(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE )
else:
loss.backward()
return loss.detach()
def __a ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
lowerCAmelCase = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name )
lowerCAmelCase = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test" )
# Create and save tokenizer
lowerCAmelCase = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(A__ ):
lowerCAmelCase = re.sub(A__ , "" , batch["sentence"] ).lower() + " "
return batch
lowerCAmelCase = train_dataset.map(A__ , remove_columns=["sentence"] )
lowerCAmelCase = eval_dataset.map(A__ , remove_columns=["sentence"] )
def extract_all_chars(A__ ):
lowerCAmelCase = " ".join(batch["text"] )
lowerCAmelCase = list(set(A__ ) )
return {"vocab": [vocab], "all_text": [all_text]}
lowerCAmelCase = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=train_dataset.column_names , )
lowerCAmelCase = train_dataset.map(
A__ , batched=A__ , batch_size=-1 , keep_in_memory=A__ , remove_columns=eval_dataset.column_names , )
lowerCAmelCase = list(set(vocab_train["vocab"][0] ) | set(vocab_test["vocab"][0] ) )
lowerCAmelCase = {v: k for k, v in enumerate(A__ )}
lowerCAmelCase = vocab_dict[" "]
del vocab_dict[" "]
lowerCAmelCase = len(A__ )
lowerCAmelCase = len(A__ )
with open("vocab.json" , "w" ) as vocab_file:
json.dump(A__ , A__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=A__ , return_attention_mask=A__ )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
lowerCAmelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
lowerCAmelCase = min(len(A__ ) , data_args.max_train_samples )
lowerCAmelCase = train_dataset.select(range(A__ ) )
if data_args.max_val_samples is not None:
lowerCAmelCase = eval_dataset.select(range(data_args.max_val_samples ) )
lowerCAmelCase = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(A__ ):
lowerCAmelCase , lowerCAmelCase = torchaudio.load(batch["path"] )
lowerCAmelCase = resampler(A__ ).squeeze().numpy()
lowerCAmelCase = 1_6000
lowerCAmelCase = batch["text"]
return batch
lowerCAmelCase = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(A__ ):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
lowerCAmelCase = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0] )
batch.update(A__ )
return batch
lowerCAmelCase = train_dataset.map(
A__ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
lowerCAmelCase = eval_dataset.map(
A__ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=A__ , num_proc=data_args.preprocessing_num_workers , )
# Metric
lowerCAmelCase = datasets.load_metric("wer" )
def compute_metrics(A__ ):
lowerCAmelCase = pred.predictions
lowerCAmelCase = np.argmax(A__ , axis=-1 )
lowerCAmelCase = processor.tokenizer.pad_token_id
lowerCAmelCase = processor.batch_decode(A__ )
# we do not want to group tokens when computing the metrics
lowerCAmelCase = processor.batch_decode(pred.label_ids , group_tokens=A__ )
lowerCAmelCase = wer_metric.compute(predictions=A__ , references=A__ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
lowerCAmelCase = DataCollatorCTCWithPadding(processor=A__ , padding=A__ )
# Initialize our Trainer
lowerCAmelCase = CTCTrainer(
model=A__ , data_collator=A__ , args=A__ , compute_metrics=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
lowerCAmelCase = model_args.model_name_or_path
else:
lowerCAmelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
lowerCAmelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
lowerCAmelCase = train_result.metrics
lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
lowerCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics("train" , A__ )
trainer.save_metrics("train" , A__ )
trainer.save_state()
# Evaluation
lowerCAmelCase = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase = trainer.evaluate()
lowerCAmelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(A__ )
lowerCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics("eval" , A__ )
trainer.save_metrics("eval" , A__ )
return results
if __name__ == "__main__":
main()
| 721
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( A__ ) -> Dict:
lowerCAmelCase , lowerCAmelCase = image.size
lowerCAmelCase , lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
lowerCAmelCase = np.array(A__ ).astype(np.floataa ) / 255.0
lowerCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class _lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : VQModel , SCREAMING_SNAKE_CASE : UNetaDModel , SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : Optional[int] = 1_0_0 , SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowerCAmelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
lowerCAmelCase = image.shape[0]
else:
raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE )}" )
if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
lowerCAmelCase = preprocess(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase = next(self.unet.parameters() ).dtype
lowerCAmelCase = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=SCREAMING_SNAKE_CASE )
lowerCAmelCase = image.to(device=self.device , dtype=SCREAMING_SNAKE_CASE )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=self.device )
lowerCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase = {}
if accepts_eta:
lowerCAmelCase = eta
for t in self.progress_bar(SCREAMING_SNAKE_CASE ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase = torch.cat([latents, image] , dim=1 )
lowerCAmelCase = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# predict the noise residual
lowerCAmelCase = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE ).sample
lowerCAmelCase = torch.clamp(SCREAMING_SNAKE_CASE , -1.0 , 1.0 )
lowerCAmelCase = image / 2 + 0.5
lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 159
| 0
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Dict = ''
_A : List[Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : List[str] , __lowercase : Optional[DatasetInfo] = None , __lowercase : Optional[str] = None , **__lowercase : int , ):
'''simple docstring'''
super().__init__(self , **__lowercase )
__UpperCAmelCase : Union[str, Any] = repo_info
__UpperCAmelCase : List[str] = token
__UpperCAmelCase : str = None
def A_ ( self : List[str] ):
'''simple docstring'''
if self.dir_cache is None:
__UpperCAmelCase : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCAmelCase : Dict = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowercase ): {'''name''': str(__lowercase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A_ ( self : str , __lowercase : str , __lowercase : str = "rb" , **__lowercase : Any , ):
'''simple docstring'''
if not isinstance(self.repo_info , __lowercase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__UpperCAmelCase : str = hf_hub_url(self.repo_info.id , __lowercase , revision=self.repo_info.sha )
return fsspec.open(
__lowercase , mode=__lowercase , headers=get_authentication_headers_for_url(__lowercase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A_ ( self : Tuple , __lowercase : List[str] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
self._get_dirs()
__UpperCAmelCase : Optional[int] = self._strip_protocol(__lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowercase )
def A_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[str, Any]=False , **__lowercase : Optional[int] ):
'''simple docstring'''
self._get_dirs()
__UpperCAmelCase : Optional[Any] = PurePosixPath(path.strip('''/''' ) )
__UpperCAmelCase : Dict = {}
for p, f in self.dir_cache.items():
__UpperCAmelCase : Any = PurePosixPath(p.strip('''/''' ) )
__UpperCAmelCase : Optional[int] = p.parent
if root == path:
__UpperCAmelCase : Dict = f
__UpperCAmelCase : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 522
|
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
if index == r:
for j in range(UpperCAmelCase_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : Optional[Any] = arr[i]
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 0 , UpperCAmelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase__ :int = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 522
| 1
|
from ... import PretrainedConfig
_lowerCamelCase = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase = """nezha"""
def __init__( self : Tuple , UpperCamelCase : Any=2_1_1_2_8 , UpperCamelCase : Tuple=7_6_8 , UpperCamelCase : Any=1_2 , UpperCamelCase : Dict=1_2 , UpperCamelCase : List[Any]=3_0_7_2 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : List[Any]=5_1_2 , UpperCamelCase : List[Any]=6_4 , UpperCamelCase : List[str]=2 , UpperCamelCase : Union[str, Any]=0.0_2 , UpperCamelCase : str=1E-12 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Dict=0 , UpperCamelCase : Dict=2 , UpperCamelCase : Tuple=3 , UpperCamelCase : Optional[Any]=True , **UpperCamelCase : List[Any] , )->int:
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = max_relative_position
__SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout
__SCREAMING_SNAKE_CASE : str = use_cache
| 447
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""vqvae"""]
def __init__( self : Tuple , UpperCamelCase : AutoencoderKL , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Mel , UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , )->Tuple:
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def __snake_case ( self : List[Any] )->int:
return 5_0 if isinstance(self.scheduler , UpperCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : int = 1 , UpperCamelCase : str = None , UpperCamelCase : np.ndarray = None , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = None , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : float = 0 , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : torch.Tensor = None , UpperCamelCase : torch.Tensor = None , UpperCamelCase : Any=True , )->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__SCREAMING_SNAKE_CASE : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
__SCREAMING_SNAKE_CASE : Any = noise
__SCREAMING_SNAKE_CASE : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.mel.audio_slice_to_image(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["sample"]
else:
__SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
if isinstance(self.scheduler , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__SCREAMING_SNAKE_CASE : Tuple = (images * 2_5_5).round().astype("uint8" )
__SCREAMING_SNAKE_CASE : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
__SCREAMING_SNAKE_CASE : List[str] = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def __snake_case ( self : Dict , UpperCamelCase : List[Image.Image] , UpperCamelCase : int = 5_0 )->np.ndarray:
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE : Dict = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( UpperCamelCase : torch.Tensor , UpperCamelCase : torch.Tensor , UpperCamelCase : float )->torch.Tensor:
__SCREAMING_SNAKE_CASE : List[str] = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 447
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def snake_case ( _a: Any )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = torch.load(_a , map_location='cpu' )
if "model" in sd.keys():
lowerCamelCase__ = torch.load(_a , map_location='cpu' )['model']
# pop unnecessary weights
lowerCamelCase__ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_a )
lowerCamelCase__ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase__ = sd.pop(_a )
lowerCamelCase__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase__ = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase__ = key.replace('.qkv_proj.' , '.q_proj.' )
lowerCamelCase__ = key.replace('.qkv_proj.' , '.k_proj.' )
lowerCamelCase__ = key.replace('.qkv_proj.' , '.v_proj.' )
lowerCamelCase__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = torch.split(_a , depth // 3 , dim=0 )
lowerCamelCase__ = q
lowerCamelCase__ = k
lowerCamelCase__ = v
del sd[key]
return sd
@torch.no_grad()
def snake_case ( _a: Union[str, Any] , _a: int , _a: Dict=None )-> str:
'''simple docstring'''
lowerCamelCase__ = load_checkpoint(_a )
if config is not None:
lowerCamelCase__ = OPTConfig.from_pretrained(_a )
else:
lowerCamelCase__ = OPTConfig()
lowerCamelCase__ = OPTModel(_a ).half().eval()
model.load_state_dict(_a )
# Check results
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
_snake_case = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 510
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def snake_case ( _a: List[str] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_a , _a ):
inputs.append(create_inputs(_a ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def snake_case ( _a: List )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = []
for output in outputs:
if isinstance(_a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class _a :
def _UpperCamelCase ( self : List[str] ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase__ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE__ ) , self.tool.outputs )
def _UpperCamelCase ( self : str ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.outputs ):
lowerCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
| 510
| 1
|
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCAmelCase : Optional[Any] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCAmelCase : Optional[int] = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCAmelCase : Dict = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase : Dict = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCAmelCase : int = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __lowerCAmelCase ( lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
for tf_name, hf_name in patterns:
__lowerCAmelCase = k.replace(lowerCamelCase , lowerCamelCase )
return k
def __lowerCAmelCase ( lowerCamelCase : dict , lowerCamelCase : dict ):
'''simple docstring'''
__lowerCAmelCase = BigBirdPegasusConfig(**lowerCamelCase )
__lowerCAmelCase = BigBirdPegasusForConditionalGeneration(lowerCamelCase )
__lowerCAmelCase = torch_model.state_dict()
__lowerCAmelCase = {}
# separating decoder weights
__lowerCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__lowerCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
__lowerCAmelCase = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase ):
continue
__lowerCAmelCase = DECODER_PATTERNS
__lowerCAmelCase = rename_state_dict_key(lowerCamelCase , lowerCamelCase )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__lowerCAmelCase = v.T
__lowerCAmelCase = torch.from_numpy(lowerCamelCase )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
__lowerCAmelCase = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase ):
continue
__lowerCAmelCase = REMAINING_PATTERNS
__lowerCAmelCase = rename_state_dict_key(lowerCamelCase , lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__lowerCAmelCase = v.T
__lowerCAmelCase = torch.from_numpy(lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
__lowerCAmelCase = mapping["model.embed_positions.weight"]
__lowerCAmelCase = mapping.pop("model.embed_positions.weight" )
__lowerCAmelCase , __lowerCAmelCase = torch_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
__lowerCAmelCase = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = tf.train.list_variables(lowerCamelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = ["global_step"]
for name, shape in tqdm(lowerCamelCase , desc="converting tf checkpoint to dict" ):
__lowerCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCAmelCase = tf.train.load_variable(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = array
return tf_weights
def __lowerCAmelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : dict ):
'''simple docstring'''
__lowerCAmelCase = get_tf_weights_as_numpy(lowerCamelCase )
__lowerCAmelCase = convert_bigbird_pegasus(lowerCamelCase , lowerCamelCase )
torch_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase : str = parser.parse_args()
lowerCAmelCase : Tuple = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 39
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 39
| 1
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] ):
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__magic_name__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__magic_name__ )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __magic_name__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 121
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = DiTPipeline
__A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__A = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__A = False
def UpperCamelCase ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase_ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=10_00 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=lowerCAmelCase_ , )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=0 ) -> Optional[int]:
if str(lowerCAmelCase_ ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ = pipe(**lowerCAmelCase_ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCAmelCase_ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1e-3 )
def UpperCamelCase ( self : Dict ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self : Optional[int] ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[str] ) -> Dict:
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
UpperCAmelCase_ = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
UpperCAmelCase_ = pipe.get_label_ids(lowerCAmelCase_ )
UpperCAmelCase_ = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def UpperCamelCase ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
UpperCAmelCase_ = ['''vase''', '''umbrella''']
UpperCAmelCase_ = pipe.get_label_ids(lowerCAmelCase_ )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 121
| 1
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> tuple[complex, complex]:
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_UpperCamelCase : List[str] = b * b - 4 * a * c
_UpperCamelCase : Dict = (-b + sqrt(UpperCAmelCase )) / (2 * a)
_UpperCamelCase : Optional[int] = (-b - sqrt(UpperCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __A ( ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Any = quadratic_roots(a=5 ,b=6 ,c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 711
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = 42
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase__ : List[str]=3 , lowercase__ : Tuple=3 , lowercase__ : List[str]=("DownEncoderBlock2D",) , lowercase__ : Optional[int]=(64,) , lowercase__ : List[Any]=2 , lowercase__ : int=32 , lowercase__ : Optional[int]="silu" , lowercase__ : List[Any]=True , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Union[str, Any] = layers_per_block
_UpperCamelCase : Union[str, Any] = torch.nn.Convad(
lowercase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[Any] = nn.ModuleList([] )
# down
_UpperCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(lowercase__ ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : Tuple = i == len(lowercase__ ) - 1
_UpperCamelCase : List[Any] = get_down_block(
lowercase__ , num_layers=self.layers_per_block , in_channels=lowercase__ , out_channels=lowercase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , )
self.down_blocks.append(lowercase__ )
# mid
_UpperCamelCase : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# out
_UpperCamelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase__ , eps=1e-6 )
_UpperCamelCase : Union[str, Any] = nn.SiLU()
_UpperCamelCase : Tuple = 2 * out_channels if double_z else out_channels
_UpperCamelCase : List[Any] = nn.Convad(block_out_channels[-1] , lowercase__ , 3 , padding=1 )
_UpperCamelCase : Tuple = False
def snake_case__ ( self : str , lowercase__ : Any ) ->Any:
'''simple docstring'''
_UpperCamelCase : Dict = x
_UpperCamelCase : str = self.conv_in(lowercase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ : Any ):
def custom_forward(*lowercase__ : List[Any] ):
return module(*lowercase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , use_reentrant=lowercase__ )
# middle
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , use_reentrant=lowercase__ )
else:
for down_block in self.down_blocks:
_UpperCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ )
# middle
_UpperCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase__ )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : Tuple = down_block(lowercase__ )
# middle
_UpperCamelCase : Optional[int] = self.mid_block(lowercase__ )
# post-process
_UpperCamelCase : List[Any] = self.conv_norm_out(lowercase__ )
_UpperCamelCase : Tuple = self.conv_act(lowercase__ )
_UpperCamelCase : int = self.conv_out(lowercase__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : str=3 , lowercase__ : List[Any]=3 , lowercase__ : Optional[int]=("UpDecoderBlock2D",) , lowercase__ : Any=(64,) , lowercase__ : str=2 , lowercase__ : List[Any]=32 , lowercase__ : Optional[int]="silu" , lowercase__ : List[str]="group" , ) ->Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Any = nn.Convad(
lowercase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# up
_UpperCamelCase : Dict = list(reversed(lowercase__ ) )
_UpperCamelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase__ ):
_UpperCamelCase : Union[str, Any] = output_channel
_UpperCamelCase : Dict = reversed_block_out_channels[i]
_UpperCamelCase : List[str] = i == len(lowercase__ ) - 1
_UpperCamelCase : Tuple = get_up_block(
lowercase__ , num_layers=self.layers_per_block + 1 , in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , resnet_time_scale_shift=lowercase__ , )
self.up_blocks.append(lowercase__ )
_UpperCamelCase : Any = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Any = SpatialNorm(block_out_channels[0] , lowercase__ )
else:
_UpperCamelCase : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase__ , eps=1e-6 )
_UpperCamelCase : Tuple = nn.SiLU()
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[0] , lowercase__ , 3 , padding=1 )
_UpperCamelCase : Optional[Any] = False
def snake_case__ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = z
_UpperCamelCase : str = self.conv_in(lowercase__ )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ : Optional[int] ):
def custom_forward(*lowercase__ : List[str] ):
return module(*lowercase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
_UpperCamelCase : Union[str, Any] = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
else:
# middle
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ )
_UpperCamelCase : int = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ )
else:
# middle
_UpperCamelCase : str = self.mid_block(lowercase__ , lowercase__ )
_UpperCamelCase : int = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = up_block(lowercase__ , lowercase__ )
# post-process
if latent_embeds is None:
_UpperCamelCase : Union[str, Any] = self.conv_norm_out(lowercase__ )
else:
_UpperCamelCase : Any = self.conv_norm_out(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = self.conv_act(lowercase__ )
_UpperCamelCase : Dict = self.conv_out(lowercase__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple=None , lowercase__ : Dict="random" , lowercase__ : List[Any]=False , lowercase__ : List[Any]=True ) ->Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[Any] = n_e
_UpperCamelCase : List[str] = vq_embed_dim
_UpperCamelCase : List[str] = beta
_UpperCamelCase : Any = legacy
_UpperCamelCase : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : str = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : List[Any] = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Any = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Optional[Any] = n_e
_UpperCamelCase : Optional[Any] = sane_index_shape
def snake_case__ ( self : str , lowercase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = inds.shape
assert len(lowercase__ ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Tuple = self.used.to(lowercase__ )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : int = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : int = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Optional[int] = self.unknown_index
return new.reshape(lowercase__ )
def snake_case__ ( self : Any , lowercase__ : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = inds.shape
assert len(lowercase__ ) > 1
_UpperCamelCase : Dict = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(lowercase__ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Tuple = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase__ )
return back.reshape(lowercase__ )
def snake_case__ ( self : List[Any] , lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[Any] = torch.argmin(torch.cdist(lowercase__ , self.embedding.weight ) , dim=1 )
_UpperCamelCase : List[str] = self.embedding(lowercase__ ).view(z.shape )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : str = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : int = self.remap_to_used(lowercase__ )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case__ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] ) ->Dict:
'''simple docstring'''
if self.remap is not None:
_UpperCamelCase : Tuple = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(lowercase__ )
_UpperCamelCase : str = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Dict = self.embedding(lowercase__ )
if shape is not None:
_UpperCamelCase : List[Any] = z_q.view(lowercase__ )
# reshape back to match original input shape
_UpperCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : str , lowercase__ : List[Any]=False ) ->Any:
'''simple docstring'''
_UpperCamelCase : Tuple = parameters
_UpperCamelCase , _UpperCamelCase : Tuple = torch.chunk(lowercase__ , 2 , dim=1 )
_UpperCamelCase : str = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
_UpperCamelCase : str = deterministic
_UpperCamelCase : Optional[Any] = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case__ ( self : Any , lowercase__ : Optional[torch.Generator] = None ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=lowercase__ , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : Optional[int] = self.mean + self.std * sample
return x
def snake_case__ ( self : str , lowercase__ : Optional[int]=None ) ->List[str]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case__ ( self : List[Any] , lowercase__ : str , lowercase__ : List[Any]=[1, 2, 3] ) ->Dict:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase__ )
def snake_case__ ( self : List[Any] ) ->str:
'''simple docstring'''
return self.mean
| 204
| 0
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 88
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = LEDTokenizer
lowercase_ = LEDTokenizerFast
lowercase_ = True
def __lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
super().setUp()
_a = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_a = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_a = {'''unk_token''': '''<unk>'''}
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Union[str, Any] , **lowerCAmelCase_ : int ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_a = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_a = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , lowerCAmelCase_ )
self.assertIn('''attention_mask''' , lowerCAmelCase_ )
self.assertNotIn('''labels''' , lowerCAmelCase_ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''A long paragraph for summarization.''']
_a = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = tokenizer(lowerCAmelCase_ , return_tensors='''pt''' )
_a = tokenizer(text_target=lowerCAmelCase_ , return_tensors='''pt''' )
_a = inputs['''input_ids''']
_a = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_a = ['''Summary of the text.''', '''Another summary.''']
_a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_a = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
_a = [[0] * len(lowerCAmelCase_ ) for x in encoded_output['''input_ids''']]
_a = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_a = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = '''A, <mask> AllenNLP sentence.'''
_a = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_a = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 22
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def a_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
_UpperCAmelCase : Dict = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(UpperCAmelCase_ )
from datasets import load_dataset
_UpperCAmelCase : List[Any] = load_dataset('''nielsr/rvlcdip-demo''' )
_UpperCAmelCase : Optional[int] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
_UpperCAmelCase : int = image_processor(UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Any = model(**UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = outputs.logits
_UpperCAmelCase : Any = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
_UpperCAmelCase : Dict = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=UpperCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 416
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = AlbertTokenizer
SCREAMING_SNAKE_CASE_ : Dict = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : int = True
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : int = AlbertTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = '''this is a test'''
_UpperCAmelCase : Union[str, Any] = '''this is a test'''
return input_text, output_text
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = '''<pad>'''
_UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(UpperCAmelCase_ ) , 30000 )
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCAmelCase_ )
_UpperCAmelCase : Tuple = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
_UpperCAmelCase : str = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Any = tokenizer.encode(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = AlbertTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
_UpperCAmelCase : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [48, 25, 21, 1289] )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AlbertTokenizer(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = tokenizer.encode('''sequence builders''' )
_UpperCAmelCase : Optional[Any] = tokenizer.encode('''multi-sequence build''' )
_UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 416
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'sew-d'
def __init__( self , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=("p2c", "c2p") , SCREAMING_SNAKE_CASE_="layer_norm" , SCREAMING_SNAKE_CASE_="gelu_python" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-7 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="group" , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=128 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.05 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="mean" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = squeeze_factor
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = position_buckets
lowerCamelCase_ = share_att_key
lowerCamelCase_ = relative_attention
lowerCamelCase_ = norm_rel_ebd
lowerCamelCase_ = list(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = feature_layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# sequence classification
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
@property
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 42
|
import inspect
import unittest
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__snake_case = inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__snake_case = "k-diffusion"
elif backend == "invisible_watermark":
__snake_case = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 371
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
def A__ ( __A , __A ):
'''simple docstring'''
# save results
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , """config.json""" ) ) and os.path.isfile(
os.path.join(__A , """config.json""" ) ):
os.remove(os.path.join(__A , """config.json""" ) )
if os.path.exists(os.path.join(__A , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__A , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__A , """pytorch_model.bin""" ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def A__ ( __A , __A=False ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 2
if unlogit:
_lowerCamelCase : Union[str, Any] = torch.pow(__A , __A )
_lowerCamelCase : Tuple = p * torch.log(__A )
_lowerCamelCase : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def A__ ( __A ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(F"""{x + 1}""" for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def A__ ( __A , __A , __A , __A=True , __A=True , __A=None , __A=False ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase : int = torch.zeros(__A , __A ).to(args.device )
_lowerCamelCase : Optional[Any] = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
_lowerCamelCase : List[Any] = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase : str = None
_lowerCamelCase : List[Any] = 0.0
_lowerCamelCase : Tuple = 0.0
for step, inputs in enumerate(tqdm(__A , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase : List[Any] = tuple(t.to(args.device ) for t in inputs )
((_lowerCamelCase) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase : str = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
_lowerCamelCase : Dict = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : int = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_lowerCamelCase : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__A )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__A )
logger.info("""Head ranked by importance scores""" )
_lowerCamelCase : Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase : int = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase : Dict = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def A__ ( __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
_lowerCamelCase : Optional[int] = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __A , original_score * args.masking_threshold )
_lowerCamelCase : int = torch.ones_like(__A )
_lowerCamelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase : Optional[Any] = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase : List[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase : int = float("""Inf""" )
_lowerCamelCase : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
_lowerCamelCase : str = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase : Union[str, Any] = new_head_mask.view(-1 )
_lowerCamelCase : Optional[Any] = 0.0
_lowerCamelCase : Tuple = new_head_mask.view_as(__A )
_lowerCamelCase : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
_lowerCamelCase : str = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__ ( __A , __A , __A , __A ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
_lowerCamelCase : Optional[Any] = 1 / loss
_lowerCamelCase : Tuple = datetime.now() - before_time
_lowerCamelCase : Optional[Any] = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
_lowerCamelCase : Any = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
_lowerCamelCase : Union[str, Any] = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : Any = datetime.now()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
_lowerCamelCase : Dict = 1 / loss
_lowerCamelCase : Tuple = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __A , __A )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__A , args.output_dir )
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__A , type=__A , required=__A , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__A , type=__A , required=__A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__A , type=__A , required=__A , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__A , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__A , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__A , type=__A , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__A , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__A , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__A , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__A , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__A , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__A , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__A , default=42 )
parser.add_argument("""--local_rank""" , type=__A , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__A , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__A , default="""""" , help="""Can be used for distant debugging.""" )
_lowerCamelCase : Tuple = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase : Tuple = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
_lowerCamelCase : Union[str, Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase : Union[str, Any] = torch.device("""cuda""" , args.local_rank )
_lowerCamelCase : Optional[int] = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase : Optional[Any] = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
_lowerCamelCase : List[str] = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __A )
# Prepare dataset
_lowerCamelCase : Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase : Optional[int] = (torch.from_numpy(__A ),)
_lowerCamelCase : int = TensorDataset(*__A )
_lowerCamelCase : str = RandomSampler(__A )
_lowerCamelCase : Dict = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase : str = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 15
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[Any] = tokenizer(example["""content"""] , truncation=__A )["""input_ids"""]
_lowerCamelCase : Tuple = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
lowerCAmelCase : int =HfArgumentParser(PretokenizationArguments)
lowerCAmelCase : int =parser.parse_args()
if args.num_workers is None:
lowerCAmelCase : Any =multiprocessing.cpu_count()
lowerCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase : str =time.time()
lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="train")
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Dict =time.time()
lowerCAmelCase : Dict =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowerCAmelCase : Tuple =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 15
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Union[str, Any] , _lowercase :int , _lowercase :List[Any]=7 , _lowercase :Union[str, Any]=3 , _lowercase :Optional[Any]=30 , _lowercase :Dict=4_00 , _lowercase :Optional[Any]=True , _lowercase :Union[str, Any]=None , _lowercase :Any=True , _lowercase :int=[0.5, 0.5, 0.5] , _lowercase :List[str]=[0.5, 0.5, 0.5] , _lowercase :List[str]=True , _lowercase :Tuple=1 / 2_55 , _lowercase :List[str]=True , ):
'''simple docstring'''
lowercase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self :Any , _lowercase :Optional[int] , _lowercase :Dict=False ):
'''simple docstring'''
if not batched:
lowercase__ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
if w < h:
lowercase__ = int(self.size["shortest_edge"] * h / w )
lowercase__ = self.size["shortest_edge"]
elif w > h:
lowercase__ = self.size["shortest_edge"]
lowercase__ = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ = self.size["shortest_edge"]
lowercase__ = self.size["shortest_edge"]
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda _lowercase : item[0] )[0]
lowercase__ = max(SCREAMING_SNAKE_CASE_ , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase ( __lowercase , unittest.TestCase ):
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = DetaImageProcessingTester(self )
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_rescale" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size" ) )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowercase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"image_id": 3_97_69, "annotations": target}
# encode them
lowercase__ = DetaImageProcessor()
lowercase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowercase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
lowercase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowercase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE_ ) )
@slow
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowercase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ = DetaImageProcessor(format="coco_panoptic" )
lowercase__ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE_ )
lowercase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowercase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
lowercase__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
lowercase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowercase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE_ ) )
| 655
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a = None ):
SCREAMING_SNAKE_CASE_ = word_bank or []
# create a table
SCREAMING_SNAKE_CASE_ = len(__a ) + 1
SCREAMING_SNAKE_CASE_ = []
for _ in range(__a ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__a ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__a )] == word:
SCREAMING_SNAKE_CASE_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__a )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__a )]:
combination.reverse()
return table[len(__a )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 626
| 0
|
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=a ):
UpperCAmelCase__ : List[str] = ['''torch''', '''torchsde''']
def __init__( self : Optional[Any] , *_snake_case : Tuple , **_snake_case : List[Any]):
"""simple docstring"""
requires_backends(self , ['''torch''', '''torchsde'''])
@classmethod
def lowerCamelCase ( cls : Optional[int] , *_snake_case : Optional[Any] , **_snake_case : str):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''])
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *_snake_case : Optional[Any] , **_snake_case : str):
"""simple docstring"""
requires_backends(cls , ['''torch''', '''torchsde'''])
| 169
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( a , a ):
@register_to_config
def __init__( self : List[Any] , _snake_case : int = 128 , _snake_case : int = 256 , _snake_case : float = 2_0_0_0.0 , _snake_case : int = 768 , _snake_case : int = 12 , _snake_case : int = 12 , _snake_case : int = 64 , _snake_case : int = 2048 , _snake_case : float = 0.1 , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Sequential(
nn.Linear(_snake_case , d_model * 4 , bias=_snake_case) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_snake_case) , nn.SiLU() , )
UpperCAmelCase_ = nn.Embedding(_snake_case , _snake_case)
UpperCAmelCase_ = False
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Dropout(p=_snake_case)
UpperCAmelCase_ = nn.ModuleList()
for lyr_num in range(_snake_case):
# FiLM conditional T5 decoder
UpperCAmelCase_ = DecoderLayer(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case)
self.decoders.append(_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case)
UpperCAmelCase_ = nn.Dropout(p=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def lowerCamelCase ( self : Dict , _snake_case : Any , _snake_case : Optional[int] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype)
UpperCAmelCase_ = self.conditioning_emb(_snake_case).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase_ = torch.broadcast_to(
torch.arange(_snake_case , device=decoder_input_tokens.device) , (batch, seq_length) , )
UpperCAmelCase_ = self.position_encoding(_snake_case)
UpperCAmelCase_ = self.continuous_inputs_projection(_snake_case)
inputs += position_encodings
UpperCAmelCase_ = self.dropout(_snake_case)
# decoder: No padding present.
UpperCAmelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype)
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase_ = [(x, self.encoder_decoder_mask(_snake_case , _snake_case)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1)
UpperCAmelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1)
for lyr in self.decoders:
UpperCAmelCase_ = lyr(
_snake_case , conditioning_emb=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )[0]
UpperCAmelCase_ = self.decoder_norm(_snake_case)
UpperCAmelCase_ = self.post_dropout(_snake_case)
UpperCAmelCase_ = self.spec_out(_snake_case)
return spec_out
class __snake_case ( nn.Module ):
def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int]=1e-6):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case))
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_snake_case , d_kv=_snake_case , num_heads=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case , ))
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , layer_norm_epsilon=_snake_case))
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]=None , _snake_case : Any=None , _snake_case : Any=None , _snake_case : Any=None , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer[0](
_snake_case , conditioning_emb=_snake_case , attention_mask=_snake_case , )
if encoder_hidden_states is not None:
UpperCAmelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to(
encoder_hidden_states.dtype)
UpperCAmelCase_ = self.layer[1](
_snake_case , key_value_states=_snake_case , attention_mask=_snake_case , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase_ = self.layer[-1](_snake_case , _snake_case)
return (hidden_states,)
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : int , _snake_case : str):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = TaLayerNorm(_snake_case)
UpperCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case)
UpperCAmelCase_ = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : Dict=None , _snake_case : List[str]=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
if conditioning_emb is not None:
UpperCAmelCase_ = self.FiLMLayer(_snake_case , _snake_case)
# Self-attention block
UpperCAmelCase_ = self.attention(_snake_case)
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _snake_case : Any , _snake_case : Any , _snake_case : Tuple , _snake_case : int , _snake_case : List[str]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = Attention(query_dim=_snake_case , heads=_snake_case , dim_head=_snake_case , out_bias=_snake_case , scale_qk=_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case , eps=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[Any] , _snake_case : List[str]=None , _snake_case : Tuple=None , ):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
UpperCAmelCase_ = self.attention(
_snake_case , encoder_hidden_states=_snake_case , attention_mask=attention_mask.squeeze(1) , )
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return layer_output
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = TaDenseGatedActDense(d_model=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case)
UpperCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_snake_case)
UpperCAmelCase_ = TaLayerNorm(_snake_case , eps=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int=None):
"""simple docstring"""
UpperCAmelCase_ = self.layer_norm(_snake_case)
if conditioning_emb is not None:
UpperCAmelCase_ = self.film(_snake_case , _snake_case)
UpperCAmelCase_ = self.DenseReluDense(_snake_case)
UpperCAmelCase_ = hidden_states + self.dropout(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Linear(_snake_case , _snake_case , bias=_snake_case)
UpperCAmelCase_ = nn.Dropout(_snake_case)
UpperCAmelCase_ = NewGELUActivation()
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.act(self.wi_a(_snake_case))
UpperCAmelCase_ = self.wi_a(_snake_case)
UpperCAmelCase_ = hidden_gelu * hidden_linear
UpperCAmelCase_ = self.dropout(_snake_case)
UpperCAmelCase_ = self.wo(_snake_case)
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self : Any , _snake_case : Optional[Any] , _snake_case : List[Any]=1e-6):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.ones(_snake_case))
UpperCAmelCase_ = eps
def lowerCamelCase ( self : Tuple , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=_snake_case)
UpperCAmelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase_ = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class __snake_case ( nn.Module ):
def lowerCamelCase ( self : Tuple , _snake_case : torch.Tensor):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.0_4_4_7_1_5 * torch.pow(_snake_case , 3.0))))
class __snake_case ( nn.Module ):
def __init__( self : int , _snake_case : int , _snake_case : Optional[Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(_snake_case , out_features * 2 , bias=_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.scale_bias(_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_snake_case , 2 , -1)
UpperCAmelCase_ = x * (1 + scale) + shift
return x
| 169
| 1
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _snake_case :
"""simple docstring"""
@staticmethod
def lowercase_ ( *a , **a ) -> Tuple:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , a , a , a ) -> Optional[int]:
"""simple docstring"""
_A = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_A = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , a , a ) -> Union[str, Any]:
"""simple docstring"""
_A = object_detector(examples[0] , threshold=0.0 )
_A = len(a )
self.assertGreater(a , 0 )
self.assertEqual(
a , [
{
'''score''': ANY(a ),
'''label''': ANY(a ),
'''box''': {'''xmin''': ANY(a ), '''ymin''': ANY(a ), '''xmax''': ANY(a ), '''ymax''': ANY(a )},
}
for i in range(a )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@require_torch
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_A = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
_A = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
_A = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = pipeline('''zero-shot-object-detection''' )
_A = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
_A = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
pass
@require_torch
@slow
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = 0.2
_A = pipeline('''zero-shot-object-detection''' )
_A = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=a , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A = 2
_A = pipeline('''zero-shot-object-detection''' )
_A = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=a , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 317
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 317
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : List[str] = 16
__A : Any = 32
def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" ):
'''simple docstring'''
_UpperCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[str] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_SCREAMING_SNAKE_CASE : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
model.eval()
_UpperCAmelCase = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCAmelCase , _UpperCAmelCase = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = metric.compute()
return eval_metric["accuracy"]
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['''lr''']
_UpperCAmelCase = int(config['''num_epochs'''] )
_UpperCAmelCase = int(config['''seed'''] )
_UpperCAmelCase = int(config['''batch_size'''] )
_UpperCAmelCase = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
_UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase = 1
_UpperCAmelCase = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
_UpperCAmelCase = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase = 0
_UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
_UpperCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_UpperCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = args.resume_from_checkpoint.split('''epoch_''' )[1]
_UpperCAmelCase = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) + 1
_UpperCAmelCase = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.print('''resumed checkpoint performance:''' , _SCREAMING_SNAKE_CASE )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCAmelCase = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCAmelCase = f'epoch_{epoch}'
_UpperCAmelCase = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = accuracy
_UpperCAmelCase = lr_scheduler.get_lr()[0]
_UpperCAmelCase = optimizer.param_groups[0]['''lr''']
_UpperCAmelCase = epoch
_UpperCAmelCase = overall_step
accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'''--output_dir''' , type=_SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_SCREAMING_SNAKE_CASE , default=2 , help='''Number of train epochs.''' , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 95
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_UpperCAmelCase = test_metrics
@require_cpu
def lowercase__ ( self : Any )->int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self : List[str] )->List[str]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self : List[Any] )->Dict:
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self : str )->int:
print(F'Found {torch.cuda.device_count()} devices.' )
_UpperCAmelCase = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
| 95
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
snake_case : List[Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
snake_case : Optional[Any] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
snake_case : Optional[int] = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case : Union[str, Any] = model(SCREAMING_SNAKE_CASE )["last_hidden_state"].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
snake_case : List[Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
snake_case : Any = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
snake_case : List[Any] = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case : List[Any] = model(SCREAMING_SNAKE_CASE )["last_hidden_state"].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 134
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 134
| 1
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = field(
metadata={"help": "The output directory where the model will be written."} , )
UpperCAmelCase_ = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
UpperCAmelCase_ = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
UpperCAmelCase_ = field(
default=__lowerCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
UpperCAmelCase_ = field(
default=__lowerCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def _a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = HfArgumentParser((ModelArguments,) )
(SCREAMING_SNAKE_CASE__) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
SCREAMING_SNAKE_CASE__ : int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
SCREAMING_SNAKE_CASE__ : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=SCREAMING_SNAKE_CASE__ , decoder_config=SCREAMING_SNAKE_CASE__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
SCREAMING_SNAKE_CASE__ : Any = decoder_config.decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
SCREAMING_SNAKE_CASE__ : int = decoder_config.bos_token_id
if pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
SCREAMING_SNAKE_CASE__ : List[str] = decoder_config.eos_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : List[str] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 708
|
import math
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : float = 1 / 1_23_45 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[str] = 3
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = int(SCREAMING_SNAKE_CASE__ )
total_partitions += 1
if check_partition_perfect(SCREAMING_SNAKE_CASE__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(SCREAMING_SNAKE_CASE__ )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 157
| 0
|
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : list[float] ) -> float:
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__lowerCAmelCase =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCamelCase ) )
return round(__lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = torch.nn.Linear(2 , 4 )
_UpperCAmelCase : List[str] = torch.optim.AdamW(model.parameters() , lr=1.0 )
_UpperCAmelCase : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(__lowerCAmelCase , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 )
_UpperCAmelCase : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
_UpperCAmelCase : str = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCAmelCase (__lowerCAmelCase ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__lowerCAmelCase )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@require_cuda
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : int = Accelerator(cpu=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = Accelerator()
_UpperCAmelCase : List[Any] = GradientState()
assert state.num_steps == 1
_UpperCAmelCase : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
_UpperCAmelCase : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = create_components()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Dict = accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
pass
with patch("torch.cuda.set_device" , lowerCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
_UpperCAmelCase : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = get_signature(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_components()
accelerator.prepare(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = get_signature(lowerCamelCase__ )
# saving hook
def save_config(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict ):
_UpperCAmelCase : Union[str, Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowerCamelCase__ , "data.json" ) , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
# loading hook
def load_config(lowerCamelCase__ : int , lowerCamelCase__ : int ):
with open(os.path.join(lowerCamelCase__ , "data.json" ) , "r" ) as f:
_UpperCAmelCase : List[str] = json.load(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = config["class_name"]
_UpperCAmelCase : List[Any] = accelerator.register_save_state_pre_hook(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = accelerator.register_load_state_pre_hook(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_UpperCAmelCase : List[Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowerCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
_UpperCAmelCase : int = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowerCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(lowerCamelCase__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = create_components()
_UpperCAmelCase : List[str] = None
# This should work
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertTrue(dummy_obj is None )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = create_components()
_UpperCAmelCase : List[Any] = [1, 2, 3]
# This should work
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowerCamelCase__ , "_is_accelerate_prepared" , lowerCamelCase__ ) , lowerCamelCase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map={"": 0} , )
_UpperCAmelCase : List[str] = Accelerator()
# This should work
_UpperCAmelCase : Optional[int] = accelerator.prepare(lowerCamelCase__ )
@slow
@require_bnb
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Optional[Any] = Accelerator()
with init_empty_weights():
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCAmelCase : str = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : Any = "cpu"
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowerCamelCase__ , load_in_abit=lowerCamelCase__ , llm_inta_enable_fpaa_cpu_offload=lowerCamelCase__ )
# This should not work and get value error
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Dict = accelerator.prepare(lowerCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
_UpperCAmelCase : Tuple = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
_UpperCAmelCase : int = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map=lowerCamelCase__ , )
_UpperCAmelCase : Union[str, Any] = Accelerator()
# This should not work and get value error
with self.assertRaises(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
_UpperCAmelCase : Tuple = infer_auto_device_map(lowerCamelCase__ )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowerCamelCase__ , device_map=lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = Accelerator()
# This should work
_UpperCAmelCase : int = accelerator.prepare(lowerCamelCase__ )
@require_cuda
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = torch.nn.Linear(10 , 10 )
_UpperCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_1 )
_UpperCAmelCase : int = Accelerator(cpu=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(lowerCamelCase__ )
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 1
|
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> Optional[Any]:
"""simple docstring"""
A = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> str:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
A = []
for current_row_idx in range(lowerCAmelCase__ ):
A = populate_current_row(lowerCAmelCase__ , lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: int ) -> List[str]:
"""simple docstring"""
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A , A = 1, 1
for current_col_idx in range(1 , lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return current_row
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Dict , ) -> Optional[int]:
"""simple docstring"""
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def _lowerCAmelCase ( UpperCamelCase__: List[Any] ) -> str:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""The input value of \'num_rows\' should be \'int\'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of \'num_rows\' should be greater than or equal to 0""" )
A = [[1]]
for row_index in range(1 , lowerCAmelCase__ ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(lowerCAmelCase__ , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase__: Any , UpperCamelCase__: int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ , lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 641
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29
| 0
|
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCAmelCase_ : Any = logging.getLogger(__name__)
UpperCAmelCase_ : List[Any] = 'pytorch_model.bin'
@dataclasses.dataclass
class __UpperCAmelCase :
'''simple docstring'''
lowercase : int = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowercase : Union[str, Any] = dataclasses.field(
default=_A, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, )
@dataclasses.dataclass
class __UpperCAmelCase :
'''simple docstring'''
lowercase : Tuple = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowercase : List[str] = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowercase : Tuple = dataclasses.field(
default=_A, metadata={"help": "A csv or a json file containing the validation data."} )
lowercase : List[Any] = dataclasses.field(
default=_A, metadata={"help": "The name of the task to train on."}, )
lowercase : Tuple = dataclasses.field(
default=_A, metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __UpperCAmelCase :
'''simple docstring'''
lowercase : Tuple = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowercase : Tuple = dataclasses.field(
default="accuracy", metadata={"help": "The evaluation metric used for the task."} )
lowercase : Optional[int] = dataclasses.field(
default="no", metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
}, )
lowercase : List[str] = dataclasses.field(
default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
lowercase : Union[str, Any] = dataclasses.field(
default=0.0, metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
}, )
lowercase : Any = dataclasses.field(
default=_A, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, )
lowercase : Dict = dataclasses.field(
default=_A, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, )
lowercase : Any = dataclasses.field(
default=_A, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, )
lowercase : List[str] = dataclasses.field(
default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, )
lowercase : Optional[Any] = dataclasses.field(
default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
lowercase : Optional[Any] = dataclasses.field(
default=_A, metadata={"help": "Random seed for initialization."}, )
def _lowerCAmelCase(a : List[str] , a : str , a : Tuple , a : Tuple , a : str , a : Union[str, Any] ) -> List[str]:
_SCREAMING_SNAKE_CASE =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_SCREAMING_SNAKE_CASE =dataset.filter(lambda a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_SCREAMING_SNAKE_CASE =int(eval_result * len(A_ ) )
print(A_ )
_SCREAMING_SNAKE_CASE =dataset.sort('''probability''' , reverse=A_ )
_SCREAMING_SNAKE_CASE =dataset.select(range(A_ ) )
_SCREAMING_SNAKE_CASE =dataset.remove_columns(['''label''', '''probability'''] )
_SCREAMING_SNAKE_CASE =dataset.rename_column('''prediction''' , '''label''' )
_SCREAMING_SNAKE_CASE =dataset.map(lambda a : {"label": idalabel[example["label"]]} )
_SCREAMING_SNAKE_CASE =dataset.shuffle(seed=args.seed )
_SCREAMING_SNAKE_CASE =os.path.join(A_ , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(A_ , index=A_ )
else:
dataset.to_json(A_ )
def _lowerCAmelCase(a : Optional[Any] , a : str , a : Any , a : List[str] , **a : Any ) -> List[Any]:
_SCREAMING_SNAKE_CASE =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_SCREAMING_SNAKE_CASE =STModelArguments(model_name_or_path=A_ )
_SCREAMING_SNAKE_CASE =STDataArguments(train_file=A_ , infer_file=A_ )
_SCREAMING_SNAKE_CASE =STTrainingArguments(output_dir=A_ )
_SCREAMING_SNAKE_CASE =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A_ ).items():
setattr(A_ , A_ , A_ )
for key, value in kwargs.items():
if hasattr(A_ , A_ ):
setattr(A_ , A_ , A_ )
# Sanity checks
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_SCREAMING_SNAKE_CASE =args.train_file
_SCREAMING_SNAKE_CASE =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_SCREAMING_SNAKE_CASE =args.eval_file
for key in data_files:
_SCREAMING_SNAKE_CASE =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_SCREAMING_SNAKE_CASE =extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_SCREAMING_SNAKE_CASE =f"""{args.output_dir}/self-train_iter-{{}}""".format
_SCREAMING_SNAKE_CASE =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A_ )
os.makedirs(A_ , exist_ok=A_ )
accelerator.wait_for_everyone()
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =False
# Show the progress bar
_SCREAMING_SNAKE_CASE =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_SCREAMING_SNAKE_CASE =data_dir_format(A_ )
assert os.path.exists(A_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''stage-1''' )
_SCREAMING_SNAKE_CASE ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A_ , A_ ):
arguments_dict.update({key: value} )
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''best-checkpoint''' , A_ )
if os.path.exists(A_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A_ , A_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A_ )
finetune(**A_ )
accelerator.wait_for_everyone()
assert os.path.exists(A_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''best-checkpoint''' )
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''stage-2''' )
# Update arguments_dict
_SCREAMING_SNAKE_CASE =model_path
_SCREAMING_SNAKE_CASE =data_files['''train''']
_SCREAMING_SNAKE_CASE =current_output_dir
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''best-checkpoint''' , A_ )
if os.path.exists(A_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A_ , A_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A_ )
finetune(**A_ )
accelerator.wait_for_everyone()
assert os.path.exists(A_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A_ )
_SCREAMING_SNAKE_CASE =iteration
_SCREAMING_SNAKE_CASE =data_dir_format(iteration + 1 )
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(os.path.join(A_ , '''best-checkpoint''' ) )
_SCREAMING_SNAKE_CASE =config.idalabel
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''eval_results_best-checkpoint.json''' )
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A_ )
with open(A_ , '''r''' ) as f:
_SCREAMING_SNAKE_CASE =float(json.load(A_ )[args.eval_metric] )
_SCREAMING_SNAKE_CASE =os.path.join(A_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A_ )
# Loading the dataset from local csv or json files.
_SCREAMING_SNAKE_CASE =load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_SCREAMING_SNAKE_CASE =load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A_ , exist_ok=A_ )
shutil.copy(A_ , os.path.join(A_ , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(A_ ):
shutil.copy(A_ , os.path.join(A_ , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(A_ , A_ , A_ , A_ , A_ , A_ )
accelerator.wait_for_everyone()
_SCREAMING_SNAKE_CASE =os.path.join(A_ , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_SCREAMING_SNAKE_CASE =eval_result
if best_iteration is None:
_SCREAMING_SNAKE_CASE =new_iteration
_SCREAMING_SNAKE_CASE =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_SCREAMING_SNAKE_CASE =new_iteration
_SCREAMING_SNAKE_CASE =new_eval_result
_SCREAMING_SNAKE_CASE =0
else:
if new_eval_result == best_eval_result:
_SCREAMING_SNAKE_CASE =new_iteration
_SCREAMING_SNAKE_CASE =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_SCREAMING_SNAKE_CASE =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A_ , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(A_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A_ , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(A_ , '''eval_results_best-iteration.json''' ) , )
| 710
|
"""simple docstring"""
def _lowerCAmelCase(a : list ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE =grid[0]
for row_n in range(1 , len(a ) ):
_SCREAMING_SNAKE_CASE =grid[row_n]
_SCREAMING_SNAKE_CASE =fill_row(a , a )
_SCREAMING_SNAKE_CASE =grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase(a : list , a : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(a ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
| 0
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class snake_case :
def __init__( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = {}
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=1)-> Dict:
'''simple docstring'''
if self.graph.get(UpperCamelCase__):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
__lowerCAmelCase: List[Any] = [[w, v]]
if not self.graph.get(UpperCamelCase__):
__lowerCAmelCase: str = []
def lowercase_ ( self : Dict)-> Any:
'''simple docstring'''
return list(self.graph)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any])-> str:
'''simple docstring'''
if self.graph.get(UpperCamelCase__):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, Any]=-2 , UpperCamelCase__ : int=-1)-> Optional[Any]:
'''simple docstring'''
if s == d:
return []
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase: str = []
if s == -2:
__lowerCAmelCase: Optional[Any] = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: int = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(UpperCamelCase__)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: List[Any] = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: List[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return visited
def lowercase_ ( self : int , UpperCamelCase__ : str=-1)-> Any:
'''simple docstring'''
if c == -1:
__lowerCAmelCase: List[str] = floor(random() * 1_0_0_0_0) + 1_0
for i in range(UpperCamelCase__):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2) + 1):
__lowerCAmelCase: List[Any] = floor(random() * c) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[int]=-2)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = deque()
__lowerCAmelCase: Any = []
if s == -2:
__lowerCAmelCase: int = list(self.graph)[0]
d.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
while d:
__lowerCAmelCase: Union[str, Any] = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def lowercase_ ( self : int , UpperCamelCase__ : Union[str, Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase_ ( self : Tuple , UpperCamelCase__ : int)-> List[Any]:
'''simple docstring'''
return len(self.graph[u])
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str=-2)-> int:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: int = []
if s == -2:
__lowerCAmelCase: Optional[int] = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: str = s
__lowerCAmelCase: Tuple = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: Any = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: Union[str, Any] = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: str = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return sorted_nodes
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: Dict = []
__lowerCAmelCase: int = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = -2
__lowerCAmelCase: Any = []
__lowerCAmelCase: int = s
__lowerCAmelCase: List[str] = False
__lowerCAmelCase: Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__lowerCAmelCase: List[Any] = len(UpperCamelCase__) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase: Dict = True
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: Optional[Any] = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: Union[str, Any] = False
indirect_parents.append(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = s
__lowerCAmelCase: List[Any] = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return list(UpperCamelCase__)
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase: Union[str, Any] = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: List[str] = -2
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: int = s
__lowerCAmelCase: Any = False
__lowerCAmelCase: Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: Any = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__lowerCAmelCase: str = len(UpperCamelCase__) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase: List[Any] = True
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: Tuple = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: int = False
indirect_parents.append(UpperCamelCase__)
__lowerCAmelCase: List[str] = s
__lowerCAmelCase: Tuple = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return False
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple=-2 , UpperCamelCase__ : Dict=-1)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = time()
return end - begin
def lowercase_ ( self : List[str] , UpperCamelCase__ : Optional[int]=-2)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = time()
self.bfs(UpperCamelCase__)
__lowerCAmelCase: str = time()
return end - begin
class snake_case :
def __init__( self : Any)-> int:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = {}
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=1)-> List[str]:
'''simple docstring'''
if self.graph.get(UpperCamelCase__):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
__lowerCAmelCase: Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(UpperCamelCase__):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
__lowerCAmelCase: Dict = [[w, u]]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int)-> str:
'''simple docstring'''
if self.graph.get(UpperCamelCase__):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(UpperCamelCase__)
# the other way round
if self.graph.get(UpperCamelCase__):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(UpperCamelCase__)
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Union[str, Any]=-2 , UpperCamelCase__ : Any=-1)-> Dict:
'''simple docstring'''
if s == d:
return []
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Optional[Any] = []
if s == -2:
__lowerCAmelCase: str = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: str = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(UpperCamelCase__)
return visited
else:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: Tuple = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: Tuple = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return visited
def lowercase_ ( self : List[Any] , UpperCamelCase__ : Tuple=-1)-> List[Any]:
'''simple docstring'''
if c == -1:
__lowerCAmelCase: Dict = floor(random() * 1_0_0_0_0) + 1_0
for i in range(UpperCamelCase__):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2) + 1):
__lowerCAmelCase: Dict = floor(random() * c) + 1
if n != i:
self.add_pair(UpperCamelCase__ , UpperCamelCase__ , 1)
def lowercase_ ( self : Any , UpperCamelCase__ : List[Any]=-2)-> Any:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = deque()
__lowerCAmelCase: List[str] = []
if s == -2:
__lowerCAmelCase: Optional[Any] = list(self.graph)[0]
d.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
while d:
__lowerCAmelCase: int = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[Any])-> List[Any]:
'''simple docstring'''
return len(self.graph[u])
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Any = []
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: List[Any] = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: List[str] = -2
__lowerCAmelCase: Union[str, Any] = []
__lowerCAmelCase: Union[str, Any] = s
__lowerCAmelCase: List[str] = False
__lowerCAmelCase: Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__lowerCAmelCase: Dict = len(UpperCamelCase__) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase: Tuple = True
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: Union[str, Any] = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: Any = False
indirect_parents.append(UpperCamelCase__)
__lowerCAmelCase: Dict = s
__lowerCAmelCase: Optional[int] = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return list(UpperCamelCase__)
def lowercase_ ( self : Tuple)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = []
__lowerCAmelCase: str = []
__lowerCAmelCase: str = list(self.graph)[0]
stack.append(UpperCamelCase__)
visited.append(UpperCamelCase__)
__lowerCAmelCase: List[str] = -2
__lowerCAmelCase: List[Any] = []
__lowerCAmelCase: str = s
__lowerCAmelCase: Tuple = False
__lowerCAmelCase: Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
__lowerCAmelCase: int = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
__lowerCAmelCase: str = len(UpperCamelCase__) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
__lowerCAmelCase: int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCAmelCase: Any = True
if len(UpperCamelCase__) != 0:
__lowerCAmelCase: Optional[Any] = stack[len(UpperCamelCase__) - 1]
else:
__lowerCAmelCase: Optional[int] = False
indirect_parents.append(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = s
__lowerCAmelCase: Any = ss
# check if se have reached the starting point
if len(UpperCamelCase__) == 0:
return False
def lowercase_ ( self : List[Any])-> Union[str, Any]:
'''simple docstring'''
return list(self.graph)
def lowercase_ ( self : int , UpperCamelCase__ : str=-2 , UpperCamelCase__ : List[str]=-1)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = time()
self.dfs(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = time()
return end - begin
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any]=-2)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = time()
self.bfs(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = time()
return end - begin
| 346
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase: str = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase: Optional[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 1
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> tuple[str, float]:
a__ : int = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> tuple[str, str]:
a__ : str = random.randint(0 , len(__UpperCamelCase ) - 1 )
a__ : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
a__ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> str:
a__ : Optional[Any] = list(__UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a__ : str = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> list[str]:
a__ : List[Any] = []
# Generate more children proportionally to the fitness score.
a__ : List[str] = int(parent_a[1] * 1_00 ) + 1
a__ : Optional[Any] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCamelCase ):
a__ : Optional[Any] = population_score[random.randint(0 , __UpperCamelCase )][0]
a__ : List[str] = crossover(parent_a[0] , __UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
return pop
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a__ : int = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
a__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a__ : Any = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(__UpperCamelCase )
# Generate random starting population.
a__ : Union[str, Any] = []
for _ in range(__UpperCamelCase ):
population.append("".join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
a__ : List[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a__ : str = [evaluate(__UpperCamelCase , __UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
a__ : List[str] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a__ : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
a__ : List[str] = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] , __UpperCamelCase , __UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
lowerCamelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
lowerCamelCase , lowerCamelCase , lowerCamelCase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 709
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCamelCase = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
"""simple docstring"""
a__ : Optional[Any] = None
a__ : Dict = os.path.abspath(os.path.join("examples" , "by_feature" ) )
a__ : Tuple = os.path.abspath("examples" )
for item in os.listdir(__UpperCAmelCase ):
if item not in EXCLUDE_EXAMPLES:
a__ : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__UpperCAmelCase , feature_script=__UpperCAmelCase , tested_section="main()" if parser_only else "training_function()" , ):
a__ : List[Any] = compare_against_test(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ : Optional[Any] = "\n".join(__UpperCAmelCase )
if special_strings is not None:
for string in special_strings:
a__ : Optional[Any] = diff.replace(__UpperCAmelCase , "" )
self.assertEqual(__UpperCAmelCase , "" )
def _A ( self ):
"""simple docstring"""
self.one_complete_example("complete_nlp_example.py" , __UpperCAmelCase )
self.one_complete_example("complete_nlp_example.py" , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
a__ : int = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.one_complete_example("complete_cv_example.py" , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = False
@classmethod
def _A ( cls ):
"""simple docstring"""
super().setUpClass()
a__ : Optional[Any] = tempfile.mkdtemp()
a__ : Tuple = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
a__ : Any = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def _A ( cls ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
a__ : Optional[int] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : Dict = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
a__ : List[str] = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
self.assertNotIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
a__ : List[str] = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
if torch.cuda.is_available():
a__ : List[str] = torch.cuda.device_count()
else:
a__ : str = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
else:
self.assertIn("epoch 0:" , __UpperCAmelCase )
self.assertIn("epoch 1:" , __UpperCAmelCase )
@slow
def _A ( self ):
"""simple docstring"""
a__ : List[str] = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
a__ : Any = run_command(self._launch_args + testargs , return_stdout=__UpperCAmelCase )
a__ : List[str] = re.findall("({.+})" , __UpperCAmelCase )
a__ : str = [r for r in results if "accuracy" in r][-1]
a__ : Optional[int] = ast.literal_eval(__UpperCAmelCase )
self.assertGreaterEqual(results["accuracy"] , 0.7_5 )
def _A ( self ):
"""simple docstring"""
a__ : Any = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def _A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
a__ : Tuple = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , "tracking" ) ) )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 207
| 0
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def snake_case__ ( UpperCamelCase ) -> int:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' ,set() )
@pytest.fixture
def snake_case__ ( UpperCamelCase ) -> Tuple:
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _snake_case ) -> str:
_UpperCamelCase : Dict = metric_id
class UpperCAmelCase :
"""simple docstring"""
A__ : Tuple = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']]
def _lowercase ( self ) -> List[Any]:
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' ,HfhMock() )
@pytest.mark.parametrize(
'''func, args''' ,[(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] )
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
if "tmp_path" in args:
_UpperCamelCase : Any = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args )
with pytest.warns(UpperCamelCase ,match='''https://huggingface.co/docs/evaluate''' ):
func(*UpperCamelCase )
| 683
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( a_ ):
"""simple docstring"""
A__ : str = ['image_processor', 'tokenizer']
A__ : Dict = 'CLIPImageProcessor'
A__ : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> List[Any]:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _snake_case , )
_UpperCamelCase : Optional[Any] = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase : List[str] = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if images is not None:
_UpperCamelCase : str = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and images is not None:
_UpperCamelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def _lowercase ( self ) -> int:
_UpperCamelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683
| 1
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 228
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 228
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( __lowercase ):
_snake_case =DistilBertTokenizer
_snake_case =DistilBertTokenizerFast
_snake_case =True
@slow
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase_ =tokenizer.encode("sequence builders" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode("multi-sequence build" , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 54
|
import sys
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
UpperCAmelCase_ =[[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ =a + chain_length - 1
UpperCAmelCase_ =sys.maxsize
for c in range(lowercase__ , lowercase__ ):
UpperCAmelCase_ =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ =cost
UpperCAmelCase_ =c
return matrix, sol
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if i == j:
print("A" + str(lowercase__ ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(")" , end=" " )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
UpperCAmelCase_ =len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ =matrix_chain_order(lowercase__ )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 54
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class A__ ( A ):
"""simple docstring"""
_lowercase : List[Any] = ['''pixel_values''']
def __init__( self : Tuple , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Union[int, float] = 1 / 2_5_5 , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , **A_ : List[Any] , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_2_4}
_lowerCAmelCase : Optional[int] = get_size_dict(A_ , default_to_square=A_ )
_lowerCAmelCase : Any = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
_lowerCAmelCase : str = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Optional[Any] = size
_lowerCAmelCase : str = resample
_lowerCAmelCase : Optional[Any] = do_rescale
_lowerCAmelCase : Dict = rescale_factor
_lowerCAmelCase : Any = do_center_crop
_lowerCAmelCase : List[Any] = crop_size
_lowerCAmelCase : List[Any] = do_flip_channel_order
def __magic_name__ ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PIL.Image.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict , ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Union[str, Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : str = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def __magic_name__ ( self : Tuple , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(A_ , data_format=A_ )
def __magic_name__ ( self : List[Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Tuple , ):
'''simple docstring'''
_lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : str = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : str = get_size_dict(A_ , default_to_square=A_ )
_lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : List[str] = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Dict = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowerCAmelCase : Any = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Tuple = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowerCAmelCase : Any = [self.rescale(image=A_ , scale=A_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase : Dict = [self.flip_channel_order(image=A_ ) for image in images]
_lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
def __magic_name__ ( self : List[Any] , A_ : Union[str, Any] , A_ : List[Tuple] = None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A_ ):
_lowerCAmelCase : Dict = target_sizes.numpy()
_lowerCAmelCase : List[Any] = []
for idx in range(len(A_ ) ):
_lowerCAmelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=A_ )
_lowerCAmelCase : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
_lowerCAmelCase : Tuple = logits.argmax(dim=1 )
_lowerCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 503
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ ( A ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , "embed_dim" ) )
self.parent.assertTrue(hasattr(A_ , "num_heads" ) )
class A__ :
"""simple docstring"""
def __init__( self : Tuple , A_ : int , A_ : Dict=1_3 , A_ : int=6_4 , A_ : str=3 , A_ : Optional[int]=[1_6, 4_8, 9_6] , A_ : int=[1, 3, 6] , A_ : Optional[int]=[1, 2, 1_0] , A_ : Any=[7, 3, 3] , A_ : Tuple=[4, 2, 2] , A_ : str=[2, 1, 1] , A_ : Optional[Any]=[2, 2, 2] , A_ : Union[str, Any]=[False, False, True] , A_ : Union[str, Any]=[0.0, 0.0, 0.0] , A_ : Any=0.02 , A_ : Optional[int]=1E-12 , A_ : str=True , A_ : List[Any]=True , A_ : Union[str, Any]=2 , ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Optional[Any] = patch_sizes
_lowerCAmelCase : Optional[int] = patch_stride
_lowerCAmelCase : List[str] = patch_padding
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Union[str, Any] = stride_kv
_lowerCAmelCase : Tuple = depth
_lowerCAmelCase : List[str] = cls_token
_lowerCAmelCase : Tuple = attention_drop_rate
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Union[str, Any] , A_ : str , A_ : Optional[int] , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = TFCvtModel(config=A_ )
_lowerCAmelCase : str = model(A_ , training=A_ )
_lowerCAmelCase : List[Any] = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __magic_name__ ( self : Any , A_ : Tuple , A_ : int , A_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Optional[Any] = TFCvtForImageClassification(A_ )
_lowerCAmelCase : Optional[Any] = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Any = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowercase : Tuple = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = False
_lowercase : Any = False
_lowercase : int = False
_lowercase : int = False
_lowercase : Tuple = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFCvtModelTester(self )
_lowerCAmelCase : Optional[Any] = TFCvtConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(A_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(A_ )
_lowerCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(A_ : Any , A_ : Any , A_ : Dict ):
_lowerCAmelCase : Tuple = model_class(A_ )
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(A_ , A_ ) )
_lowerCAmelCase : Tuple = outputs.hidden_states
_lowerCAmelCase : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[str] = True
check_hidden_states_output(A_ , A_ , A_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFCvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ) -> Dict:
"""simple docstring"""
_lowerCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
_lowerCAmelCase : str = model(**A_ )
# verify the logits
_lowerCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A_ )
_lowerCAmelCase : Any = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A_ , atol=1E-4 ) )
| 503
| 1
|
import random
class _UpperCamelCase :
'''simple docstring'''
@staticmethod
def _snake_case ( _lowerCamelCase : str ):
'''simple docstring'''
__lowerCamelCase : Tuple = [ord(_lowerCamelCase ) for i in text]
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : List[Any] = []
for i in plain:
__lowerCamelCase : Optional[int] = random.randint(1 , 3_0_0 )
__lowerCamelCase : Tuple = (i + k) * k
cipher.append(_lowerCamelCase )
key.append(_lowerCamelCase )
return cipher, key
@staticmethod
def _snake_case ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int] ):
'''simple docstring'''
__lowerCamelCase : Tuple = []
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : Tuple = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowerCamelCase ) )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase , __UpperCamelCase : Any = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 519
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = list(_lowerCamelCase )
def __len__( self : int ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Any ):
'''simple docstring'''
return "(" + ",".join(map(_lowerCamelCase , self.__components ) ) + ")"
def __add__( self : Union[str, Any] , _lowerCamelCase : Vector ):
'''simple docstring'''
__lowerCamelCase : Any = len(self )
if size == len(_lowerCamelCase ):
__lowerCamelCase : List[str] = [self.__components[i] + other.component(_lowerCamelCase ) for i in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self : List[str] , _lowerCamelCase : Vector ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = len(self )
if size == len(_lowerCamelCase ):
__lowerCamelCase : Tuple = [self.__components[i] - other.component(_lowerCamelCase ) for i in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : Tuple , _lowerCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : str , _lowerCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : List[str] , _lowerCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (float, int) ):
__lowerCamelCase : List[Any] = [c * other for c in self.__components]
return Vector(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and len(self ) == len(_lowerCamelCase ):
__lowerCamelCase : List[str] = len(self )
__lowerCamelCase : int = [self.__components[i] * other.component(_lowerCamelCase ) for i in range(_lowerCamelCase )]
return sum(_lowerCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return Vector(self.__components )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _snake_case ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
__lowerCamelCase : Any = value
def _snake_case ( self : Tuple ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__lowerCamelCase : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_lowerCamelCase ) )
def _snake_case ( self : Dict , _lowerCamelCase : Vector , _lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCamelCase : List[str] = self * other
__lowerCamelCase : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase )
return Vector([0] * dimension )
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (isinstance(UpperCAmelCase , UpperCAmelCase ))
__lowerCamelCase : Optional[Any] = [0] * dimension
__lowerCamelCase : Union[str, Any] = 1
return Vector(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : Vector , UpperCAmelCase : Vector ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase , UpperCAmelCase )
and isinstance(UpperCAmelCase , UpperCAmelCase )
and (isinstance(UpperCAmelCase , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
random.seed(UpperCAmelCase )
__lowerCamelCase : str = [random.randint(UpperCAmelCase , UpperCAmelCase ) for _ in range(UpperCAmelCase )]
return Vector(UpperCAmelCase )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : list[list[float]] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : str = matrix
__lowerCamelCase : Optional[int] = w
__lowerCamelCase : List[Any] = h
def __str__( self : Any ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , _lowerCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__lowerCamelCase : int = []
for i in range(self.__height ):
__lowerCamelCase : str = [
self.__matrix[i][j] + other.component(_lowerCamelCase , _lowerCamelCase )
for j in range(self.__width )
]
matrix.append(_lowerCamelCase )
return Matrix(_lowerCamelCase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : str , _lowerCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__lowerCamelCase : List[Any] = []
for i in range(self.__height ):
__lowerCamelCase : List[Any] = [
self.__matrix[i][j] - other.component(_lowerCamelCase , _lowerCamelCase )
for j in range(self.__width )
]
matrix.append(_lowerCamelCase )
return Matrix(_lowerCamelCase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : List[Any] , _lowerCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Optional[int] , _lowerCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : Optional[Any] , _lowerCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ): # matrix-vector
if len(_lowerCamelCase ) == self.__width:
__lowerCamelCase : List[str] = zero_vector(self.__height )
for i in range(self.__height ):
__lowerCamelCase : Optional[int] = [
self.__matrix[i][j] * other.component(_lowerCamelCase )
for j in range(self.__width )
]
ans.change_component(_lowerCamelCase , sum(_lowerCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(_lowerCamelCase , (int, float) ): # matrix-scalar
__lowerCamelCase : Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_lowerCamelCase , self.__width , self.__height )
return None
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.__height
def _snake_case ( self : Any ):
'''simple docstring'''
return self.__width
def _snake_case ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _snake_case ( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowerCamelCase : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__lowerCamelCase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_lowerCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def _snake_case ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_lowerCamelCase , _lowerCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def _snake_case ( self : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowerCamelCase : int = [
self.__matrix[0][y] * self.cofactor(0 , _lowerCamelCase ) for y in range(self.__width )
]
return sum(_lowerCamelCase )
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : list[list[float]] = [[0] * n for _ in range(UpperCAmelCase )]
return Matrix(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
random.seed(UpperCAmelCase )
__lowerCamelCase : list[list[float]] = [
[random.randint(UpperCAmelCase , UpperCAmelCase ) for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )
]
return Matrix(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 519
| 1
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __UpperCamelCase ( __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
_a = tf.convert_to_tensor(__lowerCamelCase )
_a = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCamelCase ( __lowerCamelCase : Any ) -> str:
'''simple docstring'''
_a = tf.convert_to_tensor(__lowerCamelCase )
_a = tf.cast(math.pi , x.dtype )
_a = tf.cast(0.04_47_15 , x.dtype )
_a = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowerCamelCase , 3 )) ))
return x * cdf
def __UpperCamelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_a = tf.convert_to_tensor(__lowerCamelCase )
return x * tf.tanh(tf.math.softplus(__lowerCamelCase ) )
def __UpperCamelCase ( __lowerCamelCase : List[str] ) -> int:
'''simple docstring'''
_a = tf.convert_to_tensor(__lowerCamelCase )
_a = tf.cast(0.04_47_15 , x.dtype )
_a = tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCamelCase ( __lowerCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_a = tf.convert_to_tensor(__lowerCamelCase )
_a = tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCamelCase ( __lowerCamelCase : Any ) -> List[str]:
'''simple docstring'''
return tf.clip_by_value(_gelu(__lowerCamelCase ) , -10 , 10 )
def __UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any]=-1 ) -> Tuple:
'''simple docstring'''
_a , _a = tf.split(__lowerCamelCase , 2 , axis=__lowerCamelCase )
return a * tf.math.sigmoid(__lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __UpperCamelCase ( __lowerCamelCase : Optional[int] ) -> str:
'''simple docstring'''
return tf.keras.activations.gelu(__lowerCamelCase , approximate=__lowerCamelCase )
lowercase__ = tf.keras.activations.gelu
lowercase__ = approximate_gelu_wrap
else:
lowercase__ = _gelu
lowercase__ = _gelu_new
lowercase__ = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __UpperCamelCase ( __lowerCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 276
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict=1024 ) -> str:
'''simple docstring'''
_a , _a = [], []
_a = list(zip(__lowerCamelCase , __lowerCamelCase ) )
_a , _a = sorted_examples[0]
def is_too_big(__lowerCamelCase : Union[str, Any] ):
return tok(__lowerCamelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_a = new_src + " " + src
_a = new_tgt + " " + tgt
if is_too_big(__lowerCamelCase ) or is_too_big(__lowerCamelCase ): # cant fit, finalize example
finished_src.append(__lowerCamelCase )
finished_tgt.append(__lowerCamelCase )
_a , _a = src, tgt
else: # can fit, keep adding
_a , _a = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__lowerCamelCase )
finished_tgt.append(__lowerCamelCase )
return finished_src, finished_tgt
def __UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Path , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_a = Path(__lowerCamelCase )
save_path.mkdir(exist_ok=__lowerCamelCase )
for split in ["train"]:
_a , _a = data_dir / F"{split}.source", data_dir / F"{split}.target"
_a = [x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()]
_a = [x.rstrip() for x in Path(__lowerCamelCase ).open().readlines()]
_a , _a = pack_examples(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(F"packed {split} split from {len(__lowerCamelCase )} examples -> {len(__lowerCamelCase )}." )
Path(save_path / F"{split}.source" ).open("w" ).write("\n".join(__lowerCamelCase ) )
Path(save_path / F"{split}.target" ).open("w" ).write("\n".join(__lowerCamelCase ) )
for split in ["val", "test"]:
_a , _a = data_dir / F"{split}.source", data_dir / F"{split}.target"
shutil.copyfile(__lowerCamelCase , save_path / F"{split}.source" )
shutil.copyfile(__lowerCamelCase , save_path / F"{split}.target" )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__lowerCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__lowerCamelCase , default=128 )
parser.add_argument("--data_dir" , type=__lowerCamelCase )
parser.add_argument("--save_path" , type=__lowerCamelCase )
_a = parser.parse_args()
_a = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__lowerCamelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 276
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : List[str] = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''roc_bert'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_0_5_2_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=9_1_0 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=2_4_8_5_8 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : List[str] = vocab_size
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[int] = initializer_range
snake_case__ : Any = type_vocab_size
snake_case__ : Dict = layer_norm_eps
snake_case__ : Optional[Any] = use_cache
snake_case__ : List[Any] = enable_pronunciation
snake_case__ : Tuple = enable_shape
snake_case__ : List[str] = pronunciation_embed_dim
snake_case__ : List[str] = pronunciation_vocab_size
snake_case__ : int = shape_embed_dim
snake_case__ : Dict = shape_vocab_size
snake_case__ : int = concat_input
snake_case__ : Optional[int] = position_embedding_type
snake_case__ : int = classifier_dropout
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 38
|
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(UpperCamelCase__, UpperCamelCase__ )
return actual_power(UpperCamelCase__, UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 240
| 0
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
lowercase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowercase = model.state_dict()
def to_tf_var_name(lowerCAmelCase__ ):
for patt, repl in iter(lowerCAmelCase__ ):
lowercase = name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
return f'bert/{name}'
def create_tf_var(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = tf.dtypes.as_dtype(tensor.dtype )
lowercase = tf.get_variable(dtype=lowerCAmelCase__ , shape=tensor.shape , name=lowerCAmelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowercase = to_tf_var_name(lowerCAmelCase__ )
lowercase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowercase = torch_tensor.T
lowercase = create_tf_var(tensor=lowerCAmelCase__ , name=lowerCAmelCase__ , session=lowerCAmelCase__ )
tf.keras.backend.set_value(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = session.run(lowerCAmelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(lowerCAmelCase__ , lowerCAmelCase__ )}' )
lowercase = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def UpperCamelCase ( lowerCAmelCase__=None ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Directory in which to save tensorflow model''' )
lowercase = parser.parse_args(lowerCAmelCase__ )
lowercase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 715
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_A : Tuple = 16
_A : str = 32
def __magic_name__ ( __snake_case : Accelerator , __snake_case : int = 16 , __snake_case : str = "bert-base-cased" ) -> Union[str, Any]:
lowercase : Tuple = AutoTokenizer.from_pretrained(__snake_case )
lowercase : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(__snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowercase : Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase : Optional[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__snake_case , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__snake_case , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowercase : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : str ) -> Union[str, Any]:
# Initialize accelerator
lowercase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : int = config["lr"]
lowercase : int = int(config["num_epochs"] )
lowercase : int = int(config["seed"] )
lowercase : int = int(config["batch_size"] )
lowercase : Optional[Any] = args.model_name_or_path
set_seed(__snake_case )
lowercase , lowercase : str = get_dataloaders(__snake_case , __snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(__snake_case , return_dict=__snake_case )
# Instantiate optimizer
lowercase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=__snake_case )
if accelerator.state.deepspeed_plugin is not None:
lowercase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase : int = 1
lowercase : Dict = (len(__snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase : Tuple = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=0 , num_training_steps=__snake_case , )
else:
lowercase : str = DummyScheduler(__snake_case , total_num_steps=__snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Tuple = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
lowercase : int = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase : Optional[int] = 0
# Now we train the model
lowercase : List[str] = evaluate.load("glue" , "mrpc" )
lowercase : Any = 0
lowercase : List[Any] = {}
for epoch in range(__snake_case , __snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
lowercase : Any = model(**__snake_case )
lowercase : Dict = outputs.loss
lowercase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase : Tuple = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : List[Any] = model(**__snake_case )
lowercase : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase , lowercase : Any = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__snake_case ) - 1:
lowercase : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __snake_case )
lowercase : Optional[int] = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase : Any = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(__snake_case , __snake_case )
def __magic_name__ ( ) -> Dict:
lowercase : List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__snake_case , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , )
parser.add_argument(
"--output_dir" , type=__snake_case , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=__snake_case , default=__snake_case , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=__snake_case , default=3 , help="Number of train epochs." , )
lowercase : List[str] = parser.parse_args()
lowercase : Optional[int] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_A : Dict = """facebook/wmt19-en-de"""
_A : str = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_A : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_A : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
_A : Dict = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_A : List[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_A : List[Any] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 361
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
snake_case_ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
for example in examples:
snake_case_ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case_ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case_ = pipeline(
'''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
snake_case_ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case_ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
snake_case_ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase__ ( self ):
pass
| 531
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCamelCase : int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCamelCase : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase : Tuple = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices.""" )
UpperCamelCase : Optional[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase( self ):
'''simple docstring'''
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
UpperCamelCase : List[Any] = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = Accelerator()
__lowerCamelCase : Any = (accelerator.state.process_index + 2, 10)
__lowerCamelCase : Optional[int] = torch.randint(0, 10, shape).to(accelerator.device)
__lowerCamelCase : Tuple = """"""
__lowerCamelCase : List[Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCamelCase : Dict = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCamelCase : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 629
|
import os
def A_ ( ) -> Union[str, Any]:
with open(os.path.dirname(_lowerCAmelCase ) + "/grid.txt" ) as f:
UpperCamelCase : Optional[Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowerCAmelCase ) for x in f.readline().split()] )
UpperCamelCase : str = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase : List[str] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase : Tuple = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 629
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCamelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCamelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_lowerCamelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = False , ):
SCREAMING_SNAKE_CASE__ = len(references[0] )
if any(len(UpperCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
SCREAMING_SNAKE_CASE__ = [[refs[i] for refs in references] for i in range(UpperCAmelCase__ )]
SCREAMING_SNAKE_CASE__ = TER(
normalized=UpperCAmelCase__ , no_punct=UpperCAmelCase__ , asian_support=UpperCAmelCase__ , case_sensitive=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = sb_ter.corpus_score(UpperCAmelCase__ , UpperCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 112
|
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
_lowerCAmelCase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCAmelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCAmelCase : bool = field(
default=lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCAmelCase__ ( self ):
SCREAMING_SNAKE_CASE__ = self.task_name.lower()
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = "train"
_lowerCAmelCase : Union[str, Any] = "dev"
_lowerCAmelCase : List[Any] = "test"
class lowerCamelCase_ ( lowercase ):
"""simple docstring"""
_lowerCAmelCase : GlueDataTrainingArguments
_lowerCAmelCase : str
_lowerCAmelCase : List[InputFeatures]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = Split.train , UpperCAmelCase__ = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
SCREAMING_SNAKE_CASE__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
SCREAMING_SNAKE_CASE__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = torch.load(UpperCAmelCase__ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
SCREAMING_SNAKE_CASE__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE__ = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE__ = examples[:limit_length]
SCREAMING_SNAKE_CASE__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
SCREAMING_SNAKE_CASE__ = time.time()
torch.save(self.features , UpperCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase__ ):
return self.features[i]
def lowerCAmelCase__ ( self ):
return self.label_list
| 112
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Any = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 196
|
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
SCREAMING_SNAKE_CASE__ = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 196
| 1
|
'''simple docstring'''
__UpperCAmelCase :Union[str, Any] = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__UpperCAmelCase :str = {value: key for key, value in encode_dict.items()}
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def _a ( _lowercase : str ):
'''simple docstring'''
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__UpperCAmelCase : int = ''''''
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
__UpperCAmelCase : Dict = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 266
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase :Optional[int] = TypeVar("KEY")
__UpperCAmelCase :Tuple = TypeVar("VAL")
@dataclass(frozen=_a , slots=_a )
class a ( Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class a ( _Item ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
super().__init__(snake_case , snake_case )
def __bool__( self : Dict ) -> bool:
return False
__UpperCAmelCase :Optional[Any] = _DeletedItem()
class a ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : str , snake_case : int = 8 , snake_case : float = 0.75 ) -> None:
__UpperCAmelCase : Dict = initial_block_size
__UpperCAmelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCAmelCase : str = capacity_factor
__UpperCAmelCase : Optional[int] = 0
def lowerCamelCase__ ( self : Optional[Any] , snake_case : KEY ) -> int:
return hash(snake_case ) % len(self._buckets )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> int:
return (ind + 1) % len(self._buckets )
def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : KEY , snake_case : VAL ) -> bool:
__UpperCAmelCase : int = self._buckets[ind]
if not stored:
__UpperCAmelCase : List[str] = _Item(snake_case , snake_case )
self._len += 1
return True
elif stored.key == key:
__UpperCAmelCase : int = _Item(snake_case , snake_case )
return True
else:
return False
def lowerCamelCase__ ( self : str ) -> bool:
__UpperCAmelCase : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(snake_case )
def lowerCamelCase__ ( self : List[str] ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCAmelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> None:
__UpperCAmelCase : Union[str, Any] = self._buckets
__UpperCAmelCase : Union[str, Any] = [None] * new_size
__UpperCAmelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase__ ( self : Optional[int] ) -> None:
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase__ ( self : List[str] ) -> None:
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase__ ( self : Union[str, Any] , snake_case : KEY ) -> Iterator[int]:
__UpperCAmelCase : Tuple = self._get_bucket_index(snake_case )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCAmelCase : Dict = self._get_next_ind(snake_case )
def lowerCamelCase__ ( self : str , snake_case : KEY , snake_case : VAL ) -> None:
for ind in self._iterate_buckets(snake_case ):
if self._try_set(snake_case , snake_case , snake_case ):
break
def __setitem__( self : List[str] , snake_case : KEY , snake_case : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(snake_case , snake_case )
def __delitem__( self : Any , snake_case : KEY ) -> None:
for ind in self._iterate_buckets(snake_case ):
__UpperCAmelCase : List[Any] = self._buckets[ind]
if item is None:
raise KeyError(snake_case )
if item is _deleted:
continue
if item.key == key:
__UpperCAmelCase : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , snake_case : KEY ) -> VAL:
for ind in self._iterate_buckets(snake_case ):
__UpperCAmelCase : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(snake_case )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : str ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
__UpperCAmelCase : str = ''' ,'''.join(
f'{item.key}: {item.val}' for item in self._buckets if item )
return f'HashMap({val_string})'
| 266
| 1
|
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
A : Any = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
A : Optional[int] = {
'''AI-Sweden/gpt-sw3-126m''': 2048,
'''AI-Sweden/gpt-sw3-350m''': 2048,
'''AI-Sweden/gpt-sw3-1.6b''': 2048,
'''AI-Sweden/gpt-sw3-6.7b''': 2048,
'''AI-Sweden/gpt-sw3-20b''': 2048,
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["input_ids", "attention_mask"]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : List[str] , ):
_A : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_A : List[str] = kwargs.get('name_or_path')
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored')
_A : Union[str, Any] = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_A : Any = '<|endoftext|>' if eos_token is None else eos_token
_A : str = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_A : Tuple = unk_token if pad_token is None else pad_token
_A : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
_A : str = '<pad>' if pad_token is None else pad_token
_A : Optional[int] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_A : Optional[Any] = do_lower_case
_A : Optional[Any] = remove_space
_A : int = keep_accents
_A : Union[str, Any] = vocab_file
_A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE)
# Used for whitespace normalization in input texts
# fmt : off
_A : str = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_A : Dict = re.compile(
F'[{"".join(map(SCREAMING_SNAKE_CASE , list(range(0 , 9)) + list(range(11 , 32)) + list(range(127 , 160)) + [160, 173, 8203]))}]')
def __getstate__( self : Optional[int]):
_A : Any = self.__dict__.copy()
_A : List[Any] = None
return state
def __setstate__( self : int , SCREAMING_SNAKE_CASE : int):
_A : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_A : List[str] = {}
_A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def A ( self : str):
return len(self.sp_model)
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : str):
_A : Union[str, Any] = self.non_printing_characters_re.sub('' , SCREAMING_SNAKE_CASE)
# Normalize whitespaces
_A : str = ''.join([char if char not in self.whitespaces else ' ' for char in text])
# NFC Unicode normalization
_A : Tuple = unicodedata.normalize('NFC' , SCREAMING_SNAKE_CASE)
return text
def A ( self : int , SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict):
_A : Any = self.preprocess_text(SCREAMING_SNAKE_CASE)
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : str):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE)
def A ( self : List[str] , SCREAMING_SNAKE_CASE : int):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE)
@staticmethod
def A ( SCREAMING_SNAKE_CASE : str):
return out_string
def A ( self : Any , SCREAMING_SNAKE_CASE : List[str]):
_A : Optional[Any] = []
_A : List[str] = ''
_A : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_A : str = True
_A : str = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
_A : Dict = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string
def A ( self : Union[str, Any]):
_A : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_A : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE , 'wb') as fi:
_A : Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
def A ( self : str , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[str, bool] = False):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : int = self.preprocess_text(SCREAMING_SNAKE_CASE)
_A : int = self.sp_model.encode(SCREAMING_SNAKE_CASE)
else:
_A : Optional[Any] = [self.preprocess_text(SCREAMING_SNAKE_CASE) for t in text]
_A : Dict = self.sp_model.encode(SCREAMING_SNAKE_CASE)
if return_tensors is True or return_tensors == "pt":
_A : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE)
return token_ids
def A ( self : str , SCREAMING_SNAKE_CASE : Union[int, List[int]]):
return self.sp_model.decode(SCREAMING_SNAKE_CASE)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : "Conversation"):
_A : int = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_A : Any = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(SCREAMING_SNAKE_CASE) + F'{self.bos_token}Bot:'
)
return self.encode(text=SCREAMING_SNAKE_CASE)
| 128
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( lowerCamelCase : list ):
if not nums:
raise ValueError('List is empty' )
return sum(lowerCamelCase ) / len(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 1
|
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
UpperCAmelCase__ = gray_code_sequence_string(_lowerCAmelCase )
#
# convert them to integers
for i in range(len(_lowerCAmelCase ) ):
UpperCAmelCase__ = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase__ = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase__ = "0" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase__ = "1" + smaller_sequence[i]
sequence.append(_lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """trajectory_transformer"""
UpperCAmelCase_ = ["""past_key_values"""]
UpperCAmelCase_ = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Union[str, Any] , lowerCamelCase :Optional[int]=100 , lowerCamelCase :Optional[int]=5 , lowerCamelCase :Optional[Any]=1 , lowerCamelCase :Any=1 , lowerCamelCase :int=249 , lowerCamelCase :Optional[Any]=6 , lowerCamelCase :Optional[Any]=17 , lowerCamelCase :Optional[int]=25 , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :Optional[int]=128 , lowerCamelCase :List[Any]=0.1 , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Tuple=0.1 , lowerCamelCase :Dict=0.00_06 , lowerCamelCase :Dict=512 , lowerCamelCase :Optional[Any]=0.02 , lowerCamelCase :Any=1e-12 , lowerCamelCase :Optional[Any]=1 , lowerCamelCase :List[Any]=True , lowerCamelCase :int=1 , lowerCamelCase :Dict=5_0256 , lowerCamelCase :Union[str, Any]=5_0256 , **lowerCamelCase :int , ) -> Optional[Any]:
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = action_weight
UpperCAmelCase__ = reward_weight
UpperCAmelCase__ = value_weight
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = block_size
UpperCAmelCase__ = action_dim
UpperCAmelCase__ = observation_dim
UpperCAmelCase__ = transition_dim
UpperCAmelCase__ = learning_rate
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = attn_pdrop
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = kaiming_initializer_range
UpperCAmelCase__ = use_cache
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 364
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=3 , lowerCAmelCase_=32 , lowerCAmelCase_=3 , lowerCAmelCase_=10 , lowerCAmelCase_=[10, 20, 30, 40] , lowerCAmelCase_=[1, 1, 2, 1] , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=3 , lowerCAmelCase_=None , ) -> List[str]:
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = embeddings_size
_A = hidden_sizes
_A = depths
_A = is_training
_A = use_labels
_A = hidden_act
_A = num_labels
_A = scope
_A = len(__UpperCAmelCase )
def UpperCAmelCase ( self ) -> str:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = self.get_config()
return config, pixel_values
def UpperCAmelCase ( self ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = FlaxRegNetModel(config=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = self.num_labels
_A = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
_A = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ) -> Any:
_A = self.prepare_config_and_inputs()
_A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class a ( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :str = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase :Optional[int] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :Tuple = False
def UpperCAmelCase ( self ) -> None:
_A = FlaxRegNetModelTester(self )
_A = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def UpperCAmelCase ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self ) -> Dict:
return
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> Optional[Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(__UpperCAmelCase )
_A = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_A = model_class(__UpperCAmelCase )
_A = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[str]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
_A = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase_ , **lowerCAmelCase_ ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_A = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_A = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( ) -> Optional[int]:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Dict:
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> int:
_A = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=__UpperCAmelCase , return_tensors="""np""" )
_A = model(**__UpperCAmelCase )
# verify the logits
_A = (1, 10_00)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_A = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 401
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def A ( snake_case__ ):
'''simple docstring'''
def decorator(snake_case__ ):
SCREAMING_SNAKE_CASE__ = getattr(snake_case__ , """handle_key""" , [] )
handle += [key]
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
def A ( *snake_case__ ):
'''simple docstring'''
def decorator(snake_case__ ):
SCREAMING_SNAKE_CASE__ = getattr(snake_case__ , """handle_key""" , [] )
handle += keys
setattr(snake_case__ , """handle_key""" , snake_case__ )
return func
return decorator
class lowerCamelCase (A__ ):
def __new__( cls : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super().__new__(cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not hasattr(__UpperCAmelCase , """key_handler""" ):
setattr(__UpperCAmelCase , """key_handler""" , {} )
setattr(__UpperCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , """handle_key""" , [] )
for key in handled_keys:
SCREAMING_SNAKE_CASE__ = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = get_character()
if char != KEYMAP["undefined"]:
SCREAMING_SNAKE_CASE__ = ord(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = cls.key_handler.get(__UpperCAmelCase )
if handler:
SCREAMING_SNAKE_CASE__ = char
return handler(cls )
else:
return None
def A ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 196
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
if len(UpperCAmelCase__ ) <= 1 or n <= 1:
return
insert_next(UpperCAmelCase__ , n - 1 )
rec_insertion_sort(UpperCAmelCase__ , n - 1 )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : List[str] ):
"""simple docstring"""
if index >= len(UpperCAmelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_snake_case , _snake_case : Union[str, Any] = (
collection[index],
collection[index - 1],
)
insert_next(UpperCAmelCase__ , index + 1 )
if __name__ == "__main__":
A_ = input('''Enter integers separated by spaces: ''')
A_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 713
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_snake_case : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
self.test()
def A ( self : int ):
'''simple docstring'''
_snake_case = 0
_snake_case = False
while not completed:
if counter == 1:
self.reset()
_snake_case = self.advance()
if not self.does_advance(a_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
_snake_case = self.update(a_ )
counter += 1
if counter > 10_000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def A ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : int ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def A ( self : Optional[Any] , lowercase : int=False ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase : List[int] ):
'''simple docstring'''
super(a_ , self ).__init__()
if not isinstance(a_ , a_ ) or len(a_ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_snake_case = token_ids
_snake_case = len(self.token_ids )
_snake_case = -1 # the index of the currently fulfilled step
_snake_case = False
def A ( self : Optional[Any] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def A ( self : int , lowercase : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def A ( self : Any , lowercase : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}''' )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(a_ ):
self.fulfilled_idx += 1
_snake_case = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case = True
_snake_case = completed
else:
# failed to make progress.
_snake_case = True
self.reset()
return stepped, completed, reset
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = False
_snake_case = 0
def A ( self : int ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def A ( self : List[str] , lowercase : Optional[int]=False ):
'''simple docstring'''
_snake_case = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.fulfilled_idx
_snake_case = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[List[int]] , lowercase : List[str]=True ):
'''simple docstring'''
_snake_case = max([len(a_ ) for one in nested_token_ids] )
_snake_case = {}
for token_ids in nested_token_ids:
_snake_case = root
for tidx, token_id in enumerate(a_ ):
if token_id not in level:
_snake_case = {}
_snake_case = level[token_id]
if no_subsets and self.has_subsets(a_ , a_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''' )
_snake_case = root
def A ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
_snake_case = self.trie
for current_token in current_seq:
_snake_case = start[current_token]
_snake_case = list(start.keys() )
return next_tokens
def A ( self : Dict , lowercase : Any ):
'''simple docstring'''
_snake_case = self.next_tokens(a_ )
return len(a_ ) == 0
def A ( self : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
_snake_case = list(root.values() )
if len(a_ ) == 0:
return 1
else:
return sum([self.count_leaves(a_ ) for nn in next_nodes] )
def A ( self : Tuple , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.count_leaves(a_ )
return len(a_ ) != leaf_count
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase : List[List[int]] ):
'''simple docstring'''
super(a_ , self ).__init__()
if not isinstance(a_ , a_ ) or len(a_ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(a_ , a_ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_snake_case = DisjunctiveTrie(a_ )
_snake_case = nested_token_ids
_snake_case = self.trie.max_height
_snake_case = []
_snake_case = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.trie.next_tokens(self.current_seq )
if len(a_ ) == 0:
return None
else:
return token_list
def A ( self : Dict , lowercase : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}''' )
_snake_case = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def A ( self : List[str] , lowercase : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}''' )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(a_ ):
self.current_seq.append(a_ )
_snake_case = True
else:
_snake_case = True
self.reset()
_snake_case = self.trie.reached_leaf(self.current_seq )
_snake_case = completed
return stepped, completed, reset
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = False
_snake_case = []
def A ( self : int ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def A ( self : List[str] , lowercase : str=False ):
'''simple docstring'''
_snake_case = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.current_seq
_snake_case = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : List[Constraint] ):
'''simple docstring'''
_snake_case = constraints
# max # of steps required to fulfill a given constraint
_snake_case = max([c.seqlen for c in constraints] )
_snake_case = len(a_ )
_snake_case = False
self.init_state()
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = []
_snake_case = None
_snake_case = [constraint.copy(stateful=a_ ) for constraint in self.constraints]
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def A ( self : str ):
'''simple docstring'''
_snake_case = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case = constraint.advance()
if isinstance(a_ , a_ ):
token_list.append(a_ )
elif isinstance(a_ , a_ ):
token_list.extend(a_ )
else:
_snake_case = self.inprogress_constraint.advance()
if isinstance(a_ , a_ ):
token_list.append(a_ )
elif isinstance(a_ , a_ ):
token_list.extend(a_ )
if len(a_ ) == 0:
return None
else:
return token_list
def A ( self : List[Any] , lowercase : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case = self.add(a_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def A ( self : Optional[int] , lowercase : int ):
'''simple docstring'''
if not isinstance(a_ , a_ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
_snake_case = False, False
if self.completed:
_snake_case = True
_snake_case = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case = self.inprogress_constraint.update(a_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) )
_snake_case = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a_ ):
_snake_case = pending_constraint.update(a_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(a_ )
_snake_case = None
if not complete and stepped:
_snake_case = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def A ( self : Optional[int] , lowercase : Optional[Any]=True ):
'''simple docstring'''
_snake_case = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case = [
constraint.copy(stateful=a_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case = self.inprogress_constraint.copy(stateful=a_ )
_snake_case = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 686
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase ( self : str , a_ : Optional[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = np.random.RandomState(a_ )
a__ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : Union[str, Any] = pipe(**a_ ).images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : List[Any] = pipe(**a_ ).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : List[str] = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : List[Any] = self.get_dummy_inputs()
a__ : Optional[Any] = pipe(**a_ ).images
a__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : Dict = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : int = pipe(**a_ ).images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : str = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : Any = self.get_dummy_inputs()
a__ : List[str] = pipe(**a_ ).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : Any = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : Tuple = self.get_dummy_inputs()
a__ : List[str] = pipe(**a_ ).images
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : Any = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
a__ : Any = self.get_dummy_inputs()
a__ : Any = 3 * [inputs["prompt"]]
# forward
a__ : Union[str, Any] = pipe(**a_ )
a__ : int = output.images[0, -3:, -3:, -1]
a__ : Union[str, Any] = self.get_dummy_inputs()
a__ : List[Any] = 3 * [inputs.pop("prompt" )]
a__ : Optional[Any] = pipe.tokenizer(
a_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=a_ , return_tensors="np" , )
a__ : List[str] = text_inputs["input_ids"]
a__ : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
a__ : List[Any] = prompt_embeds
# forward
a__ : List[Any] = pipe(**a_ )
a__ : List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
a__ : Tuple = self.get_dummy_inputs()
a__ : Dict = 3 * ["this is a negative prompt"]
a__ : Optional[Any] = negative_prompt
a__ : Any = 3 * [inputs["prompt"]]
# forward
a__ : str = pipe(**a_ )
a__ : List[str] = output.images[0, -3:, -3:, -1]
a__ : Union[str, Any] = self.get_dummy_inputs()
a__ : Union[str, Any] = 3 * [inputs.pop("prompt" )]
a__ : List[Any] = []
for p in [prompt, negative_prompt]:
a__ : int = pipe.tokenizer(
a_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=a_ , return_tensors="np" , )
a__ : Any = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
a__ , a__ : Union[str, Any] = embeds
# forward
a__ : Dict = pipe(**a_ )
a__ : Any = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : List[str] = ort.SessionOptions()
a__ : List[str] = False
return options
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = "A painting of a squirrel eating a burger"
np.random.seed(0 )
a__ : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
a__ : Any = output.images
a__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ : str = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
a__ : str = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a_ )
a__ : str = "open neural network exchange"
a__ : Tuple = np.random.RandomState(0 )
a__ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" )
a__ : Dict = output.images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ : Dict = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
a__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a_ )
a__ : Any = "open neural network exchange"
a__ : Optional[Any] = np.random.RandomState(0 )
a__ : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" )
a__ : int = output.images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ : Dict = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = 0
def test_callback_fn(a_ : int , a_ : int , a_ : np.ndarray ) -> None:
a__ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
a__ : Any = latents[0, -3:, -3:, -1]
a__ : Union[str, Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
a__ : Union[str, Any] = latents[0, -3:, -3:, -1]
a__ : Optional[int] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
a__ : Tuple = False
a__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
a__ : List[Any] = "Andromeda galaxy in a bottle"
a__ : str = np.random.RandomState(0 )
pipe(
prompt=a_ , num_inference_steps=5 , guidance_scale=7.5 , generator=a_ , callback=a_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a_ , a_ )
assert pipe.safety_checker is None
a__ : Tuple = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
a__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a__ : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 642
| 0
|
UpperCamelCase__ : Optional[int] = 9.8_06_65
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 620
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCAmelCase : Tuple = ['accelerate', 'launch']
__lowerCAmelCase : Union[str, Any] = Path.home() / '.cache/huggingface/accelerate'
__lowerCAmelCase : List[str] = 'default_config.yaml'
__lowerCAmelCase : List[Any] = config_folder / config_file
__lowerCAmelCase : str = config_folder / '_default_config.yaml'
__lowerCAmelCase : Optional[int] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def lowerCAmelCase__ ( cls):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=_A):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_A), self.test_file_path] , env=os.environ.copy())
def lowerCAmelCase__ ( self):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class __snake_case ( unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = 'test-tpu'
__lowerCAmelCase : str = 'us-central1-a'
__lowerCAmelCase : Union[str, Any] = 'ls'
__lowerCAmelCase : Union[str, Any] = ['accelerate', 'tpu-config']
__lowerCAmelCase : Union[str, Any] = 'cd /usr/share'
__lowerCAmelCase : List[Any] = 'tests/test_samples/test_command_file.sh'
__lowerCAmelCase : Dict = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_A)
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_A , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _A , )
| 620
| 1
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Union[str, Any] = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 143
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["YolosFeatureExtractor"]
_snake_case = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :List[Any] = 'codegen'
a :Optional[int] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _UpperCAmelCase=50_400 , _UpperCAmelCase=2_048 , _UpperCAmelCase=2_048 , _UpperCAmelCase=4_096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50_256 , _UpperCAmelCase=50_256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_ctx
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = n_inner
lowerCAmelCase_ = rotary_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase)
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def lowercase__ ( self):
lowerCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''')
lowerCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowerCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowercase__ ( self):
return self._config.n_layer
@property
def lowercase__ ( self):
return self._config.n_head
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowerCAmelCase_ = super(_UpperCAmelCase , self).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase)
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ = [
(torch.zeros(_UpperCAmelCase), torch.zeros(_UpperCAmelCase)) for _ in range(self.num_layers)
]
lowerCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
lowerCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase)] , dim=1)
return ordered_inputs
@property
def lowercase__ ( self):
return 13
| 413
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
def lowercase__ ( A_: int , A_: Optional[Any] ) -> int:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase : Optional[str] = field(
default=UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=UpperCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase : Optional[str] = field(
default=UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class _A :
"""simple docstring"""
lowerCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowerCamelCase : str = field(metadata={'help': 'Should contain the data files for the task.'} )
lowerCamelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase : bool = field(
default=UpperCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCAmelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , A_ )
# Set seed
set_seed(training_args.seed )
try:
__UpperCAmelCase =processors[data_args.task_name]()
__UpperCAmelCase =processor.get_labels()
__UpperCAmelCase =len(A_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__UpperCAmelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A_ , cache_dir=model_args.cache_dir , )
# Get datasets
__UpperCAmelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__UpperCAmelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A_: EvalPrediction ) -> Dict:
__UpperCAmelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A_ , p.label_ids )}
# Data collator
__UpperCAmelCase =DataCollatorWithPadding(A_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__UpperCAmelCase =Trainer(
model=A_ , args=A_ , train_dataset=A_ , eval_dataset=A_ , compute_metrics=A_ , data_collator=A_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase =trainer.evaluate()
__UpperCAmelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(A_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , A_ , A_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(A_ )
return results
def lowercase__ ( A_: Union[str, Any] ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 68
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
__SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = RoFormerTokenizer
def __init__( self :Tuple , _lowerCamelCase :Dict=None , _lowerCamelCase :Dict=None , _lowerCamelCase :List[Any]=True , _lowerCamelCase :Dict="[UNK]" , _lowerCamelCase :List[str]="[SEP]" , _lowerCamelCase :str="[PAD]" , _lowerCamelCase :Optional[Any]="[CLS]" , _lowerCamelCase :Optional[int]="[MASK]" , _lowerCamelCase :str=True , _lowerCamelCase :Tuple=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Any =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
):
UpperCamelCase_ : Optional[Any] =getattr(_lowerCamelCase , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Tuple =do_lower_case
UpperCamelCase_ : Union[str, Any] =strip_accents
UpperCamelCase_ : Union[str, Any] =pre_tok_class(**_lowerCamelCase )
UpperCamelCase_ : List[str] =do_lower_case
def __getstate__( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.__dict__.copy()
UpperCamelCase_ : Union[str, Any] =BertPreTokenizer()
return state
def __setstate__( self :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =d
UpperCamelCase_ : Optional[int] =self.__dict__['_tokenizer'].get_vocab()
UpperCamelCase_ : Any =PreTokenizer.custom(JiebaPreTokenizer(_lowerCamelCase ) )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any=None ):
'''simple docstring'''
UpperCamelCase_ : int =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =[self.sep_token_id]
UpperCamelCase_ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def lowerCamelCase_ ( self :str , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Optional[Any]=None , _lowerCamelCase :int=False , **_lowerCamelCase :Optional[int] , ):
'''simple docstring'''
UpperCamelCase_ : str =BertPreTokenizer()
return super().save_pretrained(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
| 357
| 0
|
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
__magic_name__ : Tuple = datasets.logging.get_logger(__name__)
__magic_name__ : Optional[Any] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
__magic_name__ : Dict = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
__magic_name__ : Optional[int] = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def snake_case ( self : Optional[Any] , __A : List[Any] ):
"""simple docstring"""
if self.config_name == "default":
_lowercase = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_lowercase = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def snake_case ( self : Any , __A : int , __A : Dict , __A : List[Any] , __A : int=None , __A : int=False ):
"""simple docstring"""
if gpus is None:
_lowercase = 1 if torch.cuda.is_available() else 0
_lowercase = {"src": sources, "mt": predictions, "ref": references}
_lowercase = [dict(zip(__A , __A ) ) for t in zip(*data.values() )]
_lowercase , _lowercase = self.scorer.predict(__A , gpus=__A , progress_bar=__A )
return {"mean_score": mean_score, "scores": scores}
| 602
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 602
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = tokenizer(example['''content'''] , truncation=_lowercase )['''input_ids''']
UpperCAmelCase_ : Optional[int] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 30
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def __lowerCAmelCase ( *lowercase : Any , **lowercase : str ):
'''simple docstring'''
pass
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : int = np.array(_lowercase )
UpperCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] , lowercase : List[str] , lowercase : Dict , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=lowercase , image_processor=lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Optional[Any] , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "facebook/sam-vit-huge"
UpperCAmelCase : Optional[int] = pipeline("mask-generation" , model=lowercase )
UpperCAmelCase : Dict = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
] , )
| 595
| 0
|
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE = 6
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 19_01
SCREAMING_SNAKE_CASE = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 673
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Tuple =logging.get_logger(__name__)
__snake_case :str ={
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : List[Any] = 'markuplm'
def __init__( self : Optional[Any] , __UpperCamelCase : List[str]=30_522 , __UpperCamelCase : Tuple=768 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Any=12 , __UpperCamelCase : Optional[int]=3_072 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : int=2 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Optional[int]=1e-12 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Optional[Any]=256 , __UpperCamelCase : int=1_024 , __UpperCamelCase : Union[str, Any]=216 , __UpperCamelCase : Optional[Any]=1_001 , __UpperCamelCase : Any=32 , __UpperCamelCase : Union[str, Any]=50 , __UpperCamelCase : Tuple="absolute" , __UpperCamelCase : int=True , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[Any] , ) -> Tuple:
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
# additional properties
A = max_depth
A = max_xpath_tag_unit_embeddings
A = max_xpath_subs_unit_embeddings
A = tag_pad_id
A = subs_pad_id
A = xpath_unit_hidden_size
| 106
|
import argparse
from collections import defaultdict
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(lowerCAmelCase__ , 'r' ) as f:
A = f.readlines()
A = F'''class {class_name}('''
A = F'''{4 * " "}def {test_name}('''
A = F'''{8 * " "}{correct_line.split()[0]}'''
A = F'''{16 * " "}{correct_line.split()[0]}'''
A = False
A = False
A = False
A = False
A = 0
A = 0
A = []
for line in lines:
if line.startswith(lowerCAmelCase__ ):
A = True
elif in_class and line.startswith(lowerCAmelCase__ ):
A = True
elif in_class and in_func and (line.startswith(lowerCAmelCase__ ) or line.startswith(lowerCAmelCase__ )):
A = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
A = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
A = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
A = A = A = A = False
else:
new_lines.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , 'w' ) as f:
for line in new_lines:
f.write(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None ) -> Union[str, Any]:
'''simple docstring'''
if fail is not None:
with open(lowerCAmelCase__ , 'r' ) as f:
A = {l.strip() for l in f.readlines()}
else:
A = None
with open(lowerCAmelCase__ , 'r' ) as f:
A = f.readlines()
A = defaultdict(lowerCAmelCase__ )
for line in correct_lines:
A , A , A , A = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__snake_case :Optional[int] =parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 106
| 1
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase__ = 128022
lowerCamelCase__ = 128028
@require_sentencepiece
class _lowerCAmelCase ( __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ =MaMaaaTokenizer
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =True
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
super().setUp()
snake_case__ : List[Any] =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case__ : Any =dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : List[Any] =Path(self.tmpdirname )
save_json(__SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case__ : int =MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] ='''</s>'''
snake_case__ : List[Any] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] =self.get_tokenizer()
snake_case__ : List[Any] =list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : str =self.get_tokenizer()
snake_case__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
snake_case__ : List[str] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
snake_case__ : Optional[int] =tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , '''This is a test''' )
@slow
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : int ={'''input_ids''': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ ='''facebook/m2m100_418M'''
lowerCAmelCase__ =[
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
lowerCAmelCase__ =[
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
lowerCAmelCase__ =[EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def UpperCAmelCase ( cls ) -> Dict:
"""simple docstring"""
snake_case__ : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
snake_case__ : Dict =1
return cls
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_8006 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_8022 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_8076 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_8063 )
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict =self.tokenizer.get_vocab()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : int ='''en'''
snake_case__ : Any =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
snake_case__ : Optional[Any] =[FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2]
# fmt: on
snake_case__ : Dict =self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] =tempfile.mkdtemp()
snake_case__ : List[str] =self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =MaMaaaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , __SCREAMING_SNAKE_CASE )
@require_torch
def UpperCAmelCase ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] ='''en'''
snake_case__ : Dict ='''fr'''
snake_case__ : Tuple =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
snake_case__ : Dict =shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
snake_case__ : Any =batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : int ='''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
snake_case__ : int ='''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] ='''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
snake_case__ : Optional[int] ='''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : Any =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_8022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_8006,
} , )
| 408
|
def lowercase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
snake_case__, snake_case__, snake_case__ : List[Any] =equationa
snake_case__, snake_case__, snake_case__ : Any =equationa
# Calculate the determinants of the matrices
snake_case__ : str =aa * ba - aa * ba
snake_case__ : Any =ca * ba - ca * ba
snake_case__ : Any =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case__ : str =determinant_x / determinant
snake_case__ : List[str] =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 408
| 1
|
import itertools
import string
from collections.abc import Generator, Iterable
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = iter(lowercase__ )
while True:
__SCREAMING_SNAKE_CASE : List[str] = tuple(itertools.islice(lowercase__ , lowercase__ ) )
if not chunk:
return
yield chunk
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
__SCREAMING_SNAKE_CASE : Optional[int] = ''''''
if len(lowercase__ ) < 2:
return dirty
for i in range(len(lowercase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowercase__ ) & 1:
clean += "X"
return clean
def _UpperCamelCase ( lowercase__ ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__SCREAMING_SNAKE_CASE : str = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__SCREAMING_SNAKE_CASE : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowercase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowercase__ )
return table
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = generate_table(lowercase__ )
__SCREAMING_SNAKE_CASE : int = prepare_input(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase__ , 2 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(lowercase__ ) , 5 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = divmod(table.index(lowercase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = generate_table(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowercase__ , 2 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = divmod(table.index(lowercase__ ) , 5 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(lowercase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 696
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int ={
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
__lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2}
__lowerCAmelCase : Union[str, Any] ={}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer
def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
__SCREAMING_SNAKE_CASE : str = strip_accents
__SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = do_lower_case
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 696
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool , __lowerCamelCase : list[int] , __lowerCamelCase : float ):
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , )
)
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =[9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
_UpperCAmelCase : Optional[Any] =math.log(len(__lowerCamelCase ) , 2 )
print(f"Optimal value : {minimax(0 , 0 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 331
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
lowercase ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
lowercase =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase : Optional[Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_UpperCAmelCase : List[Any] =getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_UpperCAmelCase : Dict =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase : Tuple =value
elif weight_type == "weight_g":
_UpperCAmelCase : List[str] =value
elif weight_type == "weight_v":
_UpperCAmelCase : Any =value
elif weight_type == "bias":
_UpperCAmelCase : Dict =value
else:
_UpperCAmelCase : Optional[Any] =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[]
_UpperCAmelCase : List[Any] =fairseq_model.state_dict()
_UpperCAmelCase : Union[str, Any] =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Optional[int] =False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase : Dict =True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : str ='unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_UpperCAmelCase : Optional[Any] =True
if "*" in mapped_key:
_UpperCAmelCase : List[str] =name.split(__lowerCamelCase )[0].split('.' )[-2]
_UpperCAmelCase : Any =mapped_key.replace('*' , __lowerCamelCase )
if "weight_g" in name:
_UpperCAmelCase : int ='weight_g'
elif "weight_v" in name:
_UpperCAmelCase : List[str] ='weight_v'
elif "bias" in name:
_UpperCAmelCase : Optional[int] ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : str ='weight'
else:
_UpperCAmelCase : str =None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Dict =full_name.split('conv_layers.' )[-1]
_UpperCAmelCase : List[str] =name.split('.' )
_UpperCAmelCase : List[str] =int(items[0] )
_UpperCAmelCase : Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase : List[str] =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase : Union[str, Any] =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase : Tuple =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase : Tuple =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase : Dict =UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
_UpperCAmelCase : Union[str, Any] =UniSpeechSatConfig()
_UpperCAmelCase : List[Any] =''
if is_finetuned:
_UpperCAmelCase : Union[str, Any] =UniSpeechSatForCTC(__lowerCamelCase )
else:
_UpperCAmelCase : Tuple =UniSpeechSatForPreTraining(__lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_UpperCAmelCase : Tuple =model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 331
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'camembert-base': 512,
}
UpperCamelCase__ = '▁'
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase_ = CamembertTokenizer
def __init__( self : str , UpperCamelCase : Tuple=None , UpperCamelCase : Tuple=None , UpperCamelCase : str="<s>" , UpperCamelCase : Optional[Any]="</s>" , UpperCamelCase : List[str]="</s>" , UpperCamelCase : int="<s>" , UpperCamelCase : Any="<unk>" , UpperCamelCase : str="<pad>" , UpperCamelCase : Union[str, Any]="<mask>" , UpperCamelCase : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCamelCase : List[Any] , ):
"""simple docstring"""
_lowercase : List[str] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
_lowercase : Optional[Any] = vocab_file
_lowercase : Optional[Any] = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Any = [self.cls_token_id]
_lowercase : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : Tuple = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowercase : List[Any] = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 322
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase__ ( A_ , A_ ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , UpperCamelCase : bool , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None ):
"""simple docstring"""
super().__init__()
_lowercase : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowercase : Optional[Any] = torch.zeros(UpperCamelCase , UpperCamelCase )
else:
_lowercase : str = None
_lowercase : List[Any] = torch.nn.Parameter(UpperCamelCase )
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
def __init__( self : List[Any] , UpperCamelCase : VQModel , UpperCamelCase : CLIPTextModel , UpperCamelCase : CLIPTokenizer , UpperCamelCase : TransformeraDModel , UpperCamelCase : VQDiffusionScheduler , UpperCamelCase : LearnedClassifierFreeSamplingEmbeddings , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=UpperCamelCase , transformer=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , )
def lowerCAmelCase_ ( self : str , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Dict ):
"""simple docstring"""
_lowercase : Dict = len(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else 1
# get prompt text embeddings
_lowercase : str = self.tokenizer(
UpperCamelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_lowercase : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowercase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowercase : Any = text_input_ids[:, : self.tokenizer.model_max_length]
_lowercase : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowercase : List[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase )
# duplicate text embeddings for each generation per prompt
_lowercase : Optional[int] = prompt_embeds.repeat_interleave(UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowercase : Tuple = self.learned_classifier_free_sampling_embeddings.embeddings
_lowercase : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase , 1 , 1 )
else:
_lowercase : Any = [''''''] * batch_size
_lowercase : Tuple = text_input_ids.shape[-1]
_lowercase : Any = self.tokenizer(
UpperCamelCase , padding='''max_length''' , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors='''pt''' , )
_lowercase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowercase : Optional[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowercase : int = negative_prompt_embeds.shape[1]
_lowercase : int = negative_prompt_embeds.repeat(1 , UpperCamelCase , 1 )
_lowercase : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : int = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 1_00 , UpperCamelCase : float = 5.0 , UpperCamelCase : float = 1.0 , UpperCamelCase : int = 1 , UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowercase : Any = 1
elif isinstance(UpperCamelCase , UpperCamelCase ):
_lowercase : int = len(UpperCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}' )
_lowercase : Any = batch_size * num_images_per_prompt
_lowercase : Optional[int] = guidance_scale > 1.0
_lowercase : Any = self._encode_prompt(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase , UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(UpperCamelCase )}.' )
# get the initial completely masked latents unless the user supplied it
_lowercase : Optional[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowercase : Optional[int] = self.transformer.num_vector_embeds - 1
_lowercase : Union[str, Any] = torch.full(UpperCamelCase , UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
_lowercase : List[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCamelCase , device=self.device )
_lowercase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
_lowercase : Optional[int] = latents
for i, t in enumerate(self.progress_bar(UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
_lowercase : Tuple = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowercase : Union[str, Any] = self.transformer(UpperCamelCase , encoder_hidden_states=UpperCamelCase , timestep=UpperCamelCase ).sample
if do_classifier_free_guidance:
_lowercase , _lowercase : Dict = model_output.chunk(2 )
_lowercase : Union[str, Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(UpperCamelCase , dim=1 , keepdim=UpperCamelCase )
_lowercase : Dict = self.truncate(UpperCamelCase , UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
_lowercase : int = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_lowercase : Optional[int] = self.vqvae.config.vq_embed_dim
_lowercase : List[str] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowercase : Union[str, Any] = self.vqvae.quantize.get_codebook_entry(UpperCamelCase , shape=UpperCamelCase )
_lowercase : Union[str, Any] = self.vqvae.decode(UpperCamelCase , force_not_quantize=UpperCamelCase ).sample
_lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : int = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
def lowerCAmelCase_ ( self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float ):
"""simple docstring"""
_lowercase , _lowercase : int = torch.sort(UpperCamelCase , 1 , descending=UpperCamelCase )
_lowercase : Any = torch.exp(UpperCamelCase )
_lowercase : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowercase : Any = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase )
_lowercase : List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
_lowercase : Tuple = keep_mask[:, :-1, :]
_lowercase : Union[str, Any] = keep_mask.gather(1 , indices.argsort(1 ) )
_lowercase : int = log_p_x_0.clone()
_lowercase : List[str] = -torch.inf # -inf = log(0)
return rv
| 322
| 1
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = generate_pascal_triangle(UpperCAmelCase_ )
for row_idx in range(UpperCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _snake_case ( UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
A__ = []
for current_row_idx in range(UpperCAmelCase_ ):
A__ = populate_current_row(UpperCAmelCase_ , UpperCAmelCase_ )
triangle.append(UpperCAmelCase_ )
return triangle
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int ):
A__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A__ , A__ = 1, 1
for current_col_idx in range(1 , UpperCAmelCase_ ):
calculate_current_element(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return current_row
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
A__ = triangle[current_row_idx - 1][current_col_idx - 1]
A__ = triangle[current_row_idx - 1][current_col_idx]
A__ = above_to_left_elt + above_to_right_elt
def _snake_case ( UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
A__ = [[1]]
for row_index in range(1 , UpperCAmelCase_ ):
A__ = [0] + result[-1] + [0]
A__ = row_index + 1
# Calculate the number of distinct elements in a row
A__ = sum(divmod(UpperCAmelCase_ , 2 ) )
A__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A__ = row_first_half + row_second_half
result.append(UpperCAmelCase_ )
return result
def _snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ : Callable , UpperCAmelCase_ : int ) -> None:
A__ = F"""{func.__name__}({value})"""
A__ = timeit(F"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 706
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_encoder_blocks""" ) )
class a :
"""simple docstring"""
def __init__( self: str , UpperCamelCase: Dict , UpperCamelCase: int=13 , UpperCamelCase: Optional[int]=64 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=4 , UpperCamelCase: Optional[Any]=[2, 2, 2, 2] , UpperCamelCase: Any=[8, 4, 2, 1] , UpperCamelCase: Optional[int]=[16, 32, 64, 1_28] , UpperCamelCase: str=[1, 4, 8, 16] , UpperCamelCase: Dict=[1, 2, 4, 8] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: int=3 , UpperCamelCase: str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = SegformerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase )
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase ):
continue
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
A__ = model(**UpperCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-1 ) )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
| 500
| 0
|
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 0.1 ):
snake_case_ = 3
snake_case_ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_SCREAMING_SNAKE_CASE : List[str] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = factor * value
_SCREAMING_SNAKE_CASE : Optional[Any] = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 533
| 0
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__A : Tuple = logging.get_logger(__name__)
class lowercase_ ( lowerCAmelCase__ ):
def __init__( self: List[Any], _lowercase: Dict=None, **_lowercase: str):
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""", _lowercase, )
super().__init__(args=_lowercase, **_lowercase)
| 704
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A : Optional[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__A : Optional[Any] = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__A : Union[str, Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : int = [0] * args.vocab_size
for k, v in counter.items():
__A : Optional[int] = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 334
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 635
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase( lowerCAmelCase__ ):
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 233
|
'''simple docstring'''
import argparse
import struct
import unittest
class __lowerCAmelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = data
# Initialize hash values
SCREAMING_SNAKE_CASE_ :Any = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE_ :Optional[int] = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
SCREAMING_SNAKE_CASE_ :Optional[int] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowercase ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = b'\x80' + (b'\x00' * (63 - (len(SCREAMING_SNAKE_CASE ) + 8) % 64))
SCREAMING_SNAKE_CASE_ :int = struct.pack('>Q' , (len(SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE_ :Any = list(struct.unpack('>16L' , SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE_ :Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE_ :List[str] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE_ :Optional[int] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.ror(SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(SCREAMING_SNAKE_CASE , 25 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
SCREAMING_SNAKE_CASE_ :List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
SCREAMING_SNAKE_CASE_ :List[str] = self.ror(SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(SCREAMING_SNAKE_CASE , 22 )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE_ :str = (sa + maj) % 0x1_00_00_00_00
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
SCREAMING_SNAKE_CASE_ :Optional[Any] = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE_ :List[Any] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE_ :List[str] = ''.join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
import hashlib
SCREAMING_SNAKE_CASE_ :List[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(SCREAMING_SNAKE_CASE ).hexdigest() )
def SCREAMING_SNAKE_CASE__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ :str = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
SCREAMING_SNAKE_CASE_ :str = parser.parse_args()
SCREAMING_SNAKE_CASE_ :str = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = f.read()
else:
SCREAMING_SNAKE_CASE_ :str = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 233
| 1
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def a__ ( snake_case ):
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def a__ ( snake_case ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case )
def a__ ( snake_case ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__SCREAMING_SNAKE_CASE : List[Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case , id=snake_case )
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# Doctest custom flag to ignore output.
lowercase_ = doctest.register_optionflag("""IGNORE_RESULT""")
lowercase_ = doctest.OutputChecker
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int , _A : Any , _A : Tuple , _A : Optional[int] ):
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowercase_ = CustomOutputChecker
lowercase_ = HfDoctestModule
lowercase_ = HfDocTestParser
| 74
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74
| 1
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = WavaVecaPhonemeCTCTokenizer
lowercase = False
def __lowercase ( self : Union[str, Any] ) -> int:
super().setUp()
lowerCAmelCase_ : Tuple = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowerCAmelCase_ : Optional[Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
def __lowercase ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[int]=False , lowerCamelCase : List[str]=20 , lowerCamelCase : List[Any]=5 ) -> Tuple[str, list]:
lowerCAmelCase_ : List[str] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCamelCase )) for i in range(len(lowerCamelCase ) )]
lowerCAmelCase_ : List[str] = list(filter(lambda lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowerCamelCase ) , lowerCamelCase ) )
if max_length is not None and len(lowerCamelCase ) > max_length:
lowerCAmelCase_ : str = toks[:max_length]
if min_length is not None and len(lowerCamelCase ) < min_length and len(lowerCamelCase ) > 0:
while len(lowerCamelCase ) < min_length:
lowerCAmelCase_ : str = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase_ : Optional[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase_ : int = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
if " " not in output_txt and len(lowerCamelCase ) > 1:
lowerCAmelCase_ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCamelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCamelCase )
)
if with_prefix_space:
lowerCAmelCase_ : Union[str, Any] = """ """ + output_txt
lowerCAmelCase_ : str = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
return output_txt, output_ids
def __lowercase ( self : int , **lowerCamelCase : Dict ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowercase ( self : int ) -> Any:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
lowerCAmelCase_ : Any = tokenizer("""m xxx ɪ""" , do_phonemize=lowerCamelCase ).input_ids
self.assertEqual(lowerCamelCase , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
lowerCAmelCase_ : Optional[int] = tokenizer("""m aaa ɪ ccc""" , do_phonemize=lowerCamelCase ).input_ids
self.assertEqual(lowerCamelCase , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase_ : str = tokenizer("""maɪ c""" , do_phonemize=lowerCamelCase ).input_ids
self.assertEqual(lowerCamelCase , [3, 2_00] ) # mai should be <unk> (=3)
def __lowercase ( self : Any ) -> int:
lowerCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase_ : Tuple = """Hello how are you"""
lowerCAmelCase_ : Union[str, Any] = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCamelCase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def __lowercase ( self : List[str] ) -> List[Any]:
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase_ : int = """Hello how are you"""
lowerCAmelCase_ : Any = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCamelCase ).input_ids , tokenizer(lowerCamelCase , do_phonemize=lowerCamelCase ).input_ids )
def __lowercase ( self : Union[str, Any] ) -> List[Any]:
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase_ : Union[str, Any] = """Hello how are you"""
lowerCAmelCase_ : Optional[int] = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
lowerCAmelCase_ : Optional[int] = tokenizer.decode(tokenizer(lowerCamelCase ).input_ids )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> int:
lowerCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase_ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase_ : List[str] = tokenizer.decode(sample_ids[0] )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , batch_tokens[0] )
self.assertEqual(lowerCamelCase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase_ : str = """Hello how are you"""
lowerCAmelCase_ : Tuple = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
self.assertEqual(lowerCamelCase , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase_ : Dict = """Hello how are you"""
lowerCAmelCase_ : List[str] = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(lowerCamelCase ).input_ids , tokenizer(lowerCamelCase , do_phonemize=lowerCamelCase ).input_ids )
def __lowercase ( self : Tuple ) -> Optional[Any]:
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
lowerCAmelCase_ : int = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase_ : List[str] = tokenizer.decode(sample_ids[0] )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , batch_tokens[0] )
self.assertEqual(lowerCamelCase , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
lowerCAmelCase_ : int = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowerCamelCase )
lowerCAmelCase_ : Dict = tokenizer.batch_decode(lowerCamelCase , filter_word_delimiter_token=lowerCamelCase )
self.assertEqual(lowerCamelCase , batch_tokens[0] )
self.assertEqual(lowerCamelCase , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase_ : List[Any] = """Hello how are you"""
lowerCAmelCase_ : Optional[Any] = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
lowerCAmelCase_ : Dict = tokenizer.decode(tokenizer(lowerCamelCase ).input_ids , filter_word_delimiter_token=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Any ) -> Optional[Any]:
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
lowerCAmelCase_ : str = """Hello how are you"""
lowerCAmelCase_ : Union[str, Any] = tokenizer.phonemize(lowerCamelCase , phonemizer_lang="""en-us""" )
lowerCAmelCase_ : Optional[Any] = tokenizer.decode(tokenizer(lowerCamelCase ).input_ids , filter_word_delimiter_token=lowerCamelCase )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , lowerCamelCase )
def __lowercase ( self : Any ) -> Tuple:
lowerCAmelCase_ : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=lowerCamelCase )
lowerCAmelCase_ : str = """Hello how are you"""
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCamelCase , phonemizer_lang="""en-us""" ).input_ids
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCamelCase , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = tokenizer.decode(lowerCamelCase )
lowerCAmelCase_ : Dict = tokenizer.decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(lowerCamelCase , """ɛ l o h aʊ a ʁ j u""" )
def __lowercase ( self : str ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
lowerCAmelCase_ : Optional[Any] = """Hello how Are you"""
lowerCAmelCase_ : Any = """hello how are you"""
lowerCAmelCase_ : List[str] = tokenizer(lowerCamelCase ).input_ids
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCamelCase ).input_ids
self.assertEqual(lowerCamelCase , lowerCamelCase )
def __lowercase ( self : Union[str, Any] ) -> str:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
lowerCAmelCase_ : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
lowerCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowercase ( self : Tuple ) -> List[str]:
lowerCAmelCase_ : Tuple = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase_ : Dict = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer.decode(lowerCamelCase , output_char_offsets=lowerCamelCase , filter_word_delimiter_token=lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase , lowerCamelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(lowerCamelCase : List[str] , lowerCamelCase : Dict ):
self.assertTrue(isinstance(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(isinstance(outputs_list[0] , lowerCamelCase ) )
# transform list to ModelOutput
lowerCAmelCase_ : Optional[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(lowerCamelCase : List[str] , lowerCamelCase : Tuple ):
if isinstance(lowerCamelCase , lowerCamelCase ):
[recursive_check(lowerCamelCase , lowerCamelCase ) for la, la in zip(lowerCamelCase , lowerCamelCase )]
self.assertEqual(lowerCamelCase , lowerCamelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
lowerCAmelCase_ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase_ : List[str] = tokenizer.batch_decode(lowerCamelCase , output_char_offsets=lowerCamelCase )
lowerCAmelCase_ : Dict = [tokenizer.decode(lowerCamelCase , output_char_offsets=lowerCamelCase ) for ids in sample_ids]
check_list_tuples_equal(lowerCamelCase , lowerCamelCase )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def __lowercase ( self : List[Any] ) -> Dict:
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def __lowercase ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def __lowercase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def __lowercase ( self : Dict ) -> str:
pass
def __lowercase ( self : Tuple ) -> List[str]:
lowerCAmelCase_ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase_ : Any = tokenizer.vocab_size
lowerCAmelCase_ : Optional[int] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase_ : Tuple = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
lowerCAmelCase_ : Union[str, Any] = tokenizer.add_tokens(lowerCamelCase )
lowerCAmelCase_ : str = tokenizer.vocab_size
lowerCAmelCase_ : Optional[int] = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size + len(lowerCamelCase ) )
lowerCAmelCase_ : Any = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase_ : Optional[int] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
lowerCAmelCase_ : str = tokenizer.add_special_tokens(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = tokenizer.vocab_size
lowerCAmelCase_ : Any = len(lowerCamelCase )
self.assertNotEqual(lowerCamelCase , 0 )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , len(lowerCamelCase ) )
self.assertEqual(lowerCamelCase , all_size_a + len(lowerCamelCase ) )
lowerCAmelCase_ : Tuple = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=lowerCamelCase )
self.assertGreaterEqual(len(lowerCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def __lowercase ( self : Tuple ) -> Any:
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def __lowercase ( self : Tuple ) -> Optional[int]:
pass
def __lowercase ( self : Any ) -> List[str]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCAmelCase_ : Tuple = self.get_tokenizers(fast=lowerCamelCase , do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase_ : Union[str, Any] = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
lowerCAmelCase_ : Tuple = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertIsInstance(output["""text"""] , lowerCamelCase )
| 398
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 398
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=100 , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : str=2 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=10 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[str]=3 , _UpperCAmelCase : int=None , _UpperCAmelCase : Union[str, Any]=[0, 1, 2, 3] , ):
_A = parent
_A = 100
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def lowerCAmelCase_ ( self : Dict ):
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase_ ( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ):
_A = BeitModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ):
_A = BeitForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any ):
_A = self.type_sequence_label_size
_A = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict ):
_A = self.num_labels
_A = BeitForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_A = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Any = False
UpperCAmelCase : List[str] = False
def lowerCAmelCase_ ( self : List[str] ):
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='BEiT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : List[str] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
_A = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Optional[int] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
_A = model(**_UpperCAmelCase ).loss
loss.backward()
def lowerCAmelCase_ ( self : Any ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_A = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( ) -> int:
'''simple docstring'''
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Any ):
_A = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values.to(_UpperCAmelCase )
# prepare bool_masked_pos
_A = torch.ones((1, 196) , dtype=torch.bool ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
_A = 281
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_A = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to(
_UpperCAmelCase )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
_A = 2_396
self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Dict ):
_A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_A = model.to(_UpperCAmelCase )
_A = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
_A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_A = Image.open(ds[0]['file'] )
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , _UpperCAmelCase )
_A = version.parse(PIL.__version__ ) < version.parse('9.0.0' )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_UpperCAmelCase , )
else:
_A = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_A = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' )
_A = model.to(_UpperCAmelCase )
_A = BeitImageProcessor(do_resize=_UpperCAmelCase , size=640 , do_center_crop=_UpperCAmelCase )
_A = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_A = Image.open(ds[0]['file'] )
_A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_A = model(**_UpperCAmelCase )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
_A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
_A = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
_A = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 7
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: int ):
'''simple docstring'''
__UpperCamelCase = mock.Mock()
__UpperCamelCase = 500
__UpperCamelCase = {}
__UpperCamelCase = HTTPError
__UpperCamelCase = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request',return_value=A_ ) as mock_head:
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class __lowerCamelCase (unittest.TestCase ):
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
__UpperCamelCase = TOKEN
HfFolder.save_token(A_ )
@classmethod
def snake_case_ ( cls: Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token,repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='test-feature-extractor',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor',use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
# Reset repo
delete_repo(token=self._token,repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_,repo_id='valid_org/test-feature-extractor-org',push_to_hub=A_,use_auth_token=self._token )
__UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_,getattr(A_,A_ ) )
def snake_case_ ( self: int ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
__UpperCamelCase = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor',use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,{'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'},)
__UpperCamelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''',trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__,'CustomFeatureExtractor' )
| 1
| 0
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
__snake_case :Any = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
__snake_case :int = MaskFormerConfig(backbone_config=snake_case__ )
__snake_case :Union[str, Any] = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
__snake_case :Dict = 847
__snake_case :Union[str, Any] = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
__snake_case :Dict = 150
__snake_case :Union[str, Any] = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
__snake_case :int = 171
__snake_case :Optional[int] = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
__snake_case :str = 133
__snake_case :str = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
__snake_case :Optional[Any] = 19
__snake_case :str = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
__snake_case :str = 65
__snake_case :List[Any] = """mapillary-vistas-id2label.json"""
__snake_case :str = json.load(open(hf_hub_download(snake_case__ ,snake_case__ ,repo_type="""dataset""" ) ,"""r""" ) )
__snake_case :Any = {int(snake_case__ ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :Tuple = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Dict ,snake_case__ : List[str] ,snake_case__ : Union[str, Any] ):
'''simple docstring'''
__snake_case :List[Any] = dct.pop(snake_case__ )
__snake_case :Union[str, Any] = val
def UpperCamelCase ( snake_case__ : Union[str, Any] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case :int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case :str = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
__snake_case :Any = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case :Union[str, Any] = in_proj_weight[:dim, :]
__snake_case :List[Any] = in_proj_bias[: dim]
__snake_case :int = in_proj_weight[
dim : dim * 2, :
]
__snake_case :List[Any] = in_proj_bias[
dim : dim * 2
]
__snake_case :Any = in_proj_weight[
-dim :, :
]
__snake_case :List[Any] = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case :Dict = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
__snake_case :List[str] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case :Tuple = in_proj_weight[: hidden_size, :]
__snake_case :str = in_proj_bias[:config.hidden_size]
__snake_case :Optional[int] = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case :Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case :str = in_proj_weight[-hidden_size :, :]
__snake_case :Tuple = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__snake_case :int = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
__snake_case :Optional[int] = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case :int = in_proj_weight[: hidden_size, :]
__snake_case :Any = in_proj_bias[:config.hidden_size]
__snake_case :List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
__snake_case :Tuple = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case :int = in_proj_weight[-hidden_size :, :]
__snake_case :int = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case :Optional[int] = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( snake_case__ : str ,snake_case__ : str ,snake_case__ : str ,snake_case__ : bool = False ):
'''simple docstring'''
__snake_case :str = get_maskformer_config(snake_case__ )
# load original state_dict
with open(snake_case__ ,"""rb""" ) as f:
__snake_case :str = pickle.load(snake_case__ )
__snake_case :int = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__snake_case :str = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_swin_q_k_v(snake_case__ ,config.backbone_config )
read_in_decoder_q_k_v(snake_case__ ,snake_case__ )
# update to torch tensors
for key, value in state_dict.items():
__snake_case :Tuple = torch.from_numpy(snake_case__ )
# load 🤗 model
__snake_case :int = MaskFormerForInstanceSegmentation(snake_case__ )
model.eval()
for name, param in model.named_parameters():
print(snake_case__ ,param.shape )
__snake_case , __snake_case :Any = model.load_state_dict(snake_case__ ,strict=snake_case__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(snake_case__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
__snake_case :Optional[int] = prepare_img()
if "vistas" in model_name:
__snake_case :List[str] = 65
elif "cityscapes" in model_name:
__snake_case :Optional[Any] = 6_5535
else:
__snake_case :Optional[Any] = 255
__snake_case :Union[str, Any] = True if """ade""" in model_name else False
__snake_case :List[Any] = MaskFormerImageProcessor(ignore_index=snake_case__ ,reduce_labels=snake_case__ )
__snake_case :int = image_processor(snake_case__ ,return_tensors="""pt""" )
__snake_case :Tuple = model(**snake_case__ )
print("""Logits:""" ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__snake_case :Optional[Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,snake_case__ ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 291
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = "align_text_model"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Optional[int] = vocab_size
__snake_case :List[str] = hidden_size
__snake_case :Optional[Any] = num_hidden_layers
__snake_case :int = num_attention_heads
__snake_case :Optional[Any] = hidden_act
__snake_case :Union[str, Any] = intermediate_size
__snake_case :int = hidden_dropout_prob
__snake_case :Optional[Any] = attention_probs_dropout_prob
__snake_case :List[str] = max_position_embeddings
__snake_case :List[str] = type_vocab_size
__snake_case :Union[str, Any] = initializer_range
__snake_case :str = layer_norm_eps
__snake_case :Any = position_embedding_type
__snake_case :List[str] = use_cache
__snake_case :Optional[int] = pad_token_id
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :Tuple = cls.get_config_dict(a__ , **a__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[int] = "align_vision_model"
def __init__( self , a__ = 3 , a__ = 6_00 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [32, 16, 24, 40, 80, 1_12, 1_92] , a__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 25_60 , a__ = "mean" , a__ = 0.02 , a__ = 0.0_01 , a__ = 0.99 , a__ = 0.2 , **a__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**a__ )
__snake_case :Union[str, Any] = num_channels
__snake_case :List[str] = image_size
__snake_case :int = width_coefficient
__snake_case :int = depth_coefficient
__snake_case :List[Any] = depth_divisor
__snake_case :Any = kernel_sizes
__snake_case :Optional[int] = in_channels
__snake_case :Optional[int] = out_channels
__snake_case :int = depthwise_padding
__snake_case :List[str] = strides
__snake_case :Union[str, Any] = num_block_repeats
__snake_case :Dict = expand_ratios
__snake_case :Union[str, Any] = squeeze_expansion_ratio
__snake_case :Any = hidden_act
__snake_case :Optional[Any] = hidden_dim
__snake_case :Union[str, Any] = pooling_type
__snake_case :Union[str, Any] = initializer_range
__snake_case :Optional[Any] = batch_norm_eps
__snake_case :List[Any] = batch_norm_momentum
__snake_case :Optional[int] = drop_connect_rate
__snake_case :Union[str, Any] = sum(a__ ) * 4
@classmethod
def __lowercase ( cls , a__ , **a__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(a__ )
__snake_case , __snake_case :int = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__snake_case :str = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a__ , **a__ )
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : str = "align"
lowerCamelCase : Union[str, Any] = True
def __init__( self , a__=None , a__=None , a__=6_40 , a__=1.0 , a__=0.02 , **a__ , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if text_config is None:
__snake_case :Union[str, Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__snake_case :str = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__snake_case :List[Any] = AlignTextConfig(**a__ )
__snake_case :Tuple = AlignVisionConfig(**a__ )
__snake_case :Tuple = projection_dim
__snake_case :int = temperature_init_value
__snake_case :Any = initializer_range
@classmethod
def __lowercase ( cls , a__ , a__ , **a__ ) -> str:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Optional[Any] = copy.deepcopy(self.__dict__ )
__snake_case :Dict = self.text_config.to_dict()
__snake_case :Union[str, Any] = self.vision_config.to_dict()
__snake_case :List[Any] = self.__class__.model_type
return output
| 291
| 1
|
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
super().__init__(*lowerCAmelCase, **lowerCAmelCase )
requires_backends(self, '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ ={}
lowerCamelCase_ ={}
if prompt is not None:
lowerCamelCase_ =prompt
if generate_kwargs is not None:
lowerCamelCase_ =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase_ ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
lowerCamelCase_ =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return super().__call__(lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
'''Note also that one single text can be provided for conditional image to text generation.''' )
lowerCamelCase_ =self.model.config.model_type
if model_type == "git":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids
lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids
lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase_ =None
return model_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''], lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
lowerCamelCase_ =None
if generate_kwargs is None:
lowerCamelCase_ ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase_ =model_inputs.pop(self.model.main_input_name )
lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase )
return model_outputs
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for output_ids in model_outputs:
lowerCamelCase_ ={
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase, skip_special_tokens=lowerCAmelCase, )
}
records.append(lowerCAmelCase )
return records
| 676
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =['image_processor', 'tokenizer']
lowercase : str ='CLIPImageProcessor'
lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 676
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
snake_case : int = logging.get_logger(__name__)
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Optional[int] = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
__lowercase = pad_size
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
"""simple docstring"""
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase , __lowercase = get_image_size(__UpperCAmelCase )
__lowercase = (old_height // size + 1) * size - old_height
__lowercase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_pad if do_pad is not None else self.do_pad
__lowercase = pad_size if pad_size is not None else self.pad_size
__lowercase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__lowercase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowercase = {"""pixel_values""": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 717
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
UpperCamelCase : Dict = MobileBertTokenizer
UpperCamelCase : Optional[int] = MobileBertTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : int = True
UpperCamelCase : Dict = filter_non_english
UpperCamelCase : Any = "google/mobilebert-uncased"
def __magic_name__ ( self ):
"""simple docstring"""
super().setUp()
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = """UNwant\u00E9d,running"""
__lowercase = """unwanted, running"""
return input_text, output_text
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# With lower casing
__lowercase = self.get_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = self.get_rust_tokenizer(do_lower_case=__UpperCAmelCase )
__lowercase = """UNwant\u00E9d,running"""
__lowercase = tokenizer.tokenize(__UpperCAmelCase )
__lowercase = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(__UpperCAmelCase )
__lowercase = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__lowercase = {}
for i, token in enumerate(__UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __magic_name__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__lowercase = tokenizer_r.encode_plus(
__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , )
__lowercase = tokenizer_r.do_lower_case if hasattr(__UpperCAmelCase , """do_lower_case""" ) else False
__lowercase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ["""的""", """人""", """有"""]
__lowercase = """""".join(__UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__lowercase = True
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = False
__lowercase = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = tokenizer_r.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_p.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
__lowercase = tokenizer_r.convert_ids_to_tokens(__UpperCAmelCase )
__lowercase = tokenizer_p.convert_ids_to_tokens(__UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__lowercase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCAmelCase )
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 339
| 0
|
from math import pi, sqrt
def lowerCamelCase__ ( snake_case_ : float ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(snake_case_ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(snake_case_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
assert gamma(0.5 ) == sqrt(snake_case_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
snake_case_ = 1.0
while num:
snake_case_ = float(input('Gamma of: '))
print(F'gamma({num}) = {gamma(num)}')
print('\nEnter 0 to exit...')
| 592
|
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : int ) -> float:
__snake_case = x
__snake_case = y
for step in range(snake_case_ ): # noqa: B007
__snake_case = a * a - b * b + x
__snake_case = 2 * a * b + y
__snake_case = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( snake_case_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( snake_case_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) )
def lowerCamelCase__ ( snake_case_ : int = 800 , snake_case_ : int = 600 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ) -> Image.Image:
__snake_case = Image.new('''RGB''' , (image_width, image_height) )
__snake_case = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
__snake_case = figure_width / image_width * image_height
__snake_case = figure_center_x + (image_x / image_width - 0.5) * figure_width
__snake_case = figure_center_y + (image_y / image_height - 0.5) * figure_height
__snake_case = get_distance(snake_case_ , snake_case_ , snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__snake_case = get_color_coded_rgb(snake_case_ )
else:
__snake_case = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 592
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : int = logging.get_logger()
def lowercase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : LevitConfig , lowerCAmelCase : Path , lowerCAmelCase : bool = True):
"""simple docstring"""
print(f"""Converting {name}...""")
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
_A : Optional[Any] = timm.create_model('''levit_128s''' , pretrained=lowerCAmelCase)
else:
_A : Any = timm.create_model('''levit_128''' , pretrained=lowerCAmelCase)
if hidden_sizes == 192:
_A : Optional[Any] = timm.create_model('''levit_192''' , pretrained=lowerCAmelCase)
if hidden_sizes == 256:
_A : str = timm.create_model('''levit_256''' , pretrained=lowerCAmelCase)
if hidden_sizes == 384:
_A : List[str] = timm.create_model('''levit_384''' , pretrained=lowerCAmelCase)
from_model.eval()
_A : Any = LevitForImageClassificationWithTeacher(lowerCAmelCase).eval()
_A : List[Any] = OrderedDict()
_A : List[Any] = from_model.state_dict()
_A : List[str] = list(from_model.state_dict().keys())
_A : Tuple = list(our_model.state_dict().keys())
print(len(lowerCAmelCase) , len(lowerCAmelCase))
for i in range(len(lowerCAmelCase)):
_A : str = weights[og_keys[i]]
our_model.load_state_dict(lowerCAmelCase)
_A : Optional[Any] = torch.randn((2, 3, 224, 224))
_A : Dict = from_model(lowerCAmelCase)
_A : Union[str, Any] = our_model(lowerCAmelCase).logits
assert torch.allclose(lowerCAmelCase , lowerCAmelCase), "The model logits don't match the original one."
_A : Dict = name
print(lowerCAmelCase)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
_A : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(f"""Pushed {checkpoint_name}""")
def lowercase ( lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : bool = True):
"""simple docstring"""
_A : Any = '''imagenet-1k-id2label.json'''
_A : int = 1000
_A : str = (1, num_labels)
_A : str = '''huggingface/label-files'''
_A : int = num_labels
_A : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='''dataset''') , '''r'''))
_A : Optional[int] = {int(lowerCAmelCase): v for k, v in idalabel.items()}
_A : List[Any] = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
_A : Optional[Any] = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase)
_A : Optional[int] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
_A : List[Any] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
__UpperCamelCase : Tuple = parser.parse_args()
__UpperCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 417
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """esm"""
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=3_0_7_2 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1_0_2_6 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : List[Any] = vocab_size
_A : int = hidden_size
_A : Any = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Dict = intermediate_size
_A : Union[str, Any] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[str] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : Tuple = position_embedding_type
_A : Any = use_cache
_A : Tuple = emb_layer_norm_before
_A : Union[str, Any] = token_dropout
_A : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_A : Dict = EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_A : List[str] = EsmFoldConfig(**UpperCAmelCase__ )
_A : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_A : Tuple = get_default_vocab_list()
else:
_A : int = vocab_list
else:
_A : Dict = None
_A : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : str = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
_A : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 1_2_8
__magic_name__ = None
def _lowerCamelCase ( self ) -> List[Any]:
if self.trunk is None:
_A : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
_A : Dict = TrunkConfig(**self.trunk )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Dict = asdict(self )
_A : str = self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = 4_8
__magic_name__ = 1_0_2_4
__magic_name__ = 1_2_8
__magic_name__ = 3_2
__magic_name__ = 3_2
__magic_name__ = 3_2
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 4
__magic_name__ = 1_2_8
__magic_name__ = None
def _lowerCamelCase ( self ) -> str:
if self.structure_module is None:
_A : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
_A : Any = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_A : List[Any] = self.sequence_state_dim // self.sequence_head_width
_A : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[Any] = asdict(self )
_A : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = 3_8_4
__magic_name__ = 1_2_8
__magic_name__ = 1_6
__magic_name__ = 1_2_8
__magic_name__ = 1_2
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 0.1
__magic_name__ = 8
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 7
__magic_name__ = 1_0
__magic_name__ = 1e-8
__magic_name__ = 1e5
def _lowerCamelCase ( self ) -> Union[str, Any]:
return asdict(self )
def lowercase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 417
| 1
|
import numpy as np
def a__ ( A__, A__ ):
return np.where(vector > 0, A__, (alpha * (np.exp(A__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """microsoft/speecht5_tts"""
_UpperCAmelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_UpperCAmelCase = """text_reader"""
_UpperCAmelCase = SpeechTaProcessor
_UpperCAmelCase = SpeechTaForTextToSpeech
_UpperCAmelCase = SpeechTaHifiGan
_UpperCAmelCase = ["""text"""]
_UpperCAmelCase = ["""audio"""]
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
SCREAMING_SNAKE_CASE_ : List[Any] = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.pre_processor(text=lowerCAmelCase__ , return_tensors='pt' , truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(embeddings_dataset[7_3_0_5]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 101
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _SCREAMING_SNAKE_CASE:
def __lowerCamelCase ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> int:
return None
class _SCREAMING_SNAKE_CASE:
def __lowerCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> str:
return None
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
A_ : Dict = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCamelCase ( self : int ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase_ , 'tf' , 12 , **UpperCamelCase_ )
@require_torch
@slow
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase_ , 'pt' , 12 , **UpperCamelCase_ )
@require_torch
@slow
def __lowerCamelCase ( self : List[str] ) -> int:
from transformers import BertModel
SCREAMING_SNAKE_CASE__ :Tuple = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(UpperCamelCase_ ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE__ :Union[str, Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE__ :Any = BertModel(BertConfig(vocab_size=len(UpperCamelCase_ ) ) )
model.save_pretrained(UpperCamelCase_ )
self._test_export(UpperCamelCase_ , 'pt' , 12 , UpperCamelCase_ )
@require_tf
@slow
def __lowerCamelCase ( self : Tuple ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ :List[str] = self._test_export(UpperCamelCase_ , 'tf' , 12 , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = quantize(Path(UpperCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __lowerCamelCase ( self : List[str] ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ :List[Any] = self._test_export(UpperCamelCase_ , 'pt' , 12 , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = quantize(UpperCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ :Any = Path(UpperCamelCase_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
return path
except Exception as e:
self.fail(UpperCamelCase_ )
@require_torch
@require_tokenizers
@slow
def __lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
from transformers import BertModel
SCREAMING_SNAKE_CASE__ :Optional[int] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
SCREAMING_SNAKE_CASE__ :Dict = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def __lowerCamelCase ( self : str ) -> int:
from transformers import TFBertModel
SCREAMING_SNAKE_CASE__ :List[str] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
SCREAMING_SNAKE_CASE__ :Tuple = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase_ , UpperCamelCase_ , 'tf' )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :List[Any] = FeatureExtractionPipeline(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = infer_shapes(UpperCamelCase_ , UpperCamelCase_ )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , UpperCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __lowerCamelCase ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Any = ['input_ids', 'attention_mask', 'token_type_ids']
SCREAMING_SNAKE_CASE__ :Tuple = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :List[str] = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase_ ) , set(UpperCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :Any = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase_ , UpperCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase_ ) , 1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __lowerCamelCase ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 320
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = BigBirdConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE__ :Tuple = BigBirdForQuestionAnswering(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :Any = BigBirdForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCAmelCase__ , UpperCAmelCase__ , is_trivia_qa=UpperCAmelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--big_bird_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.'''
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 320
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.