code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import numpy as np
def lowerCamelCase__ ( UpperCAmelCase_ )-> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase__ ( UpperCAmelCase_ )-> np.ndarray:
"""simple docstring"""
return vector * sigmoid(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = '''deberta-v2'''
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=128_100 , UpperCAmelCase_ : List[str]=1_536 , UpperCAmelCase_ : List[str]=24 , UpperCAmelCase_ : int=24 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[Any]=1e-7 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[Any]="gelu" , **UpperCAmelCase_ : Optional[int] , )-> int:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = relative_attention
UpperCamelCase = max_relative_positions
UpperCamelCase = pad_token_id
UpperCamelCase = position_biased_input
# Backwards compatibility
if type(UpperCAmelCase_ ) == str:
UpperCamelCase = [x.strip() for x in pos_att_type.lower().split("|" )]
UpperCamelCase = pos_att_type
UpperCamelCase = vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = kwargs.get("pooler_hidden_size" , UpperCAmelCase_ )
UpperCamelCase = pooler_dropout
UpperCamelCase = pooler_hidden_act
class __a ( _lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> int:
"""simple docstring"""
return 12
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : int = 40 , UpperCAmelCase_ : "PreTrainedTokenizerBase" = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase = super().generate_dummy_inputs(preprocessor=UpperCAmelCase_ , framework=UpperCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 554 | 1 |
from __future__ import annotations
def a_ ( _A , _A , _A , ) -> tuple:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : Optional[int] = 8
def a_ ( _A , _A=BITS ) -> List[Any]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x * 255).int().clamp(0 , 255 )
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b c h w -> b c 1 h w' )
snake_case__ = ((x & mask) != 0).float()
snake_case__ = rearrange(_A , 'b c d h w -> b (c d) h w' )
snake_case__ = bits * 2 - 1
return bits
def a_ ( _A , _A=BITS ) -> List[str]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x > 0).int()
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A , dtype=torch.intaa )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b (c d) h w -> b c d h w' , d=8 )
snake_case__ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self , _A , _A , _A , _A = 0.0 , _A = True , _A=None , _A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ = self.alphas_cumprod[timestep]
snake_case__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ = self._get_variance(_A , _A )
snake_case__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ = model_output.device if torch.is_tensor(_A ) else 'cpu'
snake_case__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_A ).to(_A )
snake_case__ = self._get_variance(_A , _A ) ** 0.5 * eta * noise
snake_case__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def a_ ( self , _A , _A , _A , _A="epsilon" , _A=None , _A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ = torch.split(_A , sample.shape[1] , dim=1 )
else:
snake_case__ = None
# 1. compute alphas, betas
snake_case__ = self.alphas_cumprod[t]
snake_case__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ = 1 - alpha_prod_t
snake_case__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ = 0
if t > 0:
snake_case__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_A ).to(model_output.device )
snake_case__ = (self._get_variance(_A , predicted_variance=_A ) ** 0.5) * noise
snake_case__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase: Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
snake_case__ = bit_scale
snake_case__ = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase , UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 50 , UpperCamelCase: Optional[torch.Generator] = None , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , **UpperCamelCase: int , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase , )
snake_case__ = decimal_to_bits(UpperCamelCase ) * self.bit_scale
snake_case__ = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ = self.unet(UpperCamelCase , UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
snake_case__ = bits_to_decimal(UpperCamelCase )
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 372 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 16
a = 32
def a_ ( __UpperCAmelCase , __UpperCAmelCase = 16 ) -> int:
"""simple docstring"""
snake_case: int =AutoTokenizer.from_pretrained('bert-base-cased' )
snake_case: str =load_dataset('glue' , 'mrpc' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case: Union[str, Any] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case: List[str] =datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case: Tuple =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case: List[str] =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case: Tuple =16
elif accelerator.mixed_precision != "no":
snake_case: Tuple =8
else:
snake_case: Dict =None
return tokenizer.pad(
__UpperCAmelCase , padding='longest' , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case: Dict =DataLoader(
tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
snake_case: Optional[int] =DataLoader(
tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __UpperCAmelCase ) == "1":
snake_case: str =2
# New Code #
snake_case: Union[str, Any] =int(args.gradient_accumulation_steps )
# Initialize accelerator
snake_case: List[Any] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case: Union[str, Any] =config['lr']
snake_case: List[str] =int(config['num_epochs'] )
snake_case: List[Any] =int(config['seed'] )
snake_case: Optional[Any] =int(config['batch_size'] )
snake_case: Union[str, Any] =evaluate.load('glue' , 'mrpc' )
set_seed(__UpperCAmelCase )
snake_case , snake_case: Any =get_dataloaders(__UpperCAmelCase , __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case: str =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case: Optional[int] =model.to(accelerator.device )
# Instantiate optimizer
snake_case: Tuple =AdamW(params=model.parameters() , lr=__UpperCAmelCase )
# Instantiate scheduler
snake_case: int =get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case: Optional[int] =accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Now we train the model
for epoch in range(__UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCAmelCase ):
snake_case: Any =model(**__UpperCAmelCase )
snake_case: str =output.loss
accelerator.backward(__UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case: List[Any] =model(**__UpperCAmelCase )
snake_case: Dict =outputs.logits.argmax(dim=-1 )
snake_case , snake_case: Any =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__UpperCAmelCase , references=__UpperCAmelCase , )
snake_case: str =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __UpperCAmelCase )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case: List[Any] =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__UpperCAmelCase , default=__UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__UpperCAmelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
snake_case: List[Any] =parser.parse_args()
snake_case: Dict ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 350 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Any =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = DPTConfig()
if "large" in checkpoint_url:
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
_lowerCAmelCase = [5, 11, 17, 23]
_lowerCAmelCase = [2_56, 5_12, 10_24, 10_24]
_lowerCAmelCase = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
_lowerCAmelCase = True
_lowerCAmelCase = 1_50
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(cached_download(hf_hub_url(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
_lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
_lowerCAmelCase = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
_lowerCAmelCase = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
_lowerCAmelCase = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
_lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
_lowerCAmelCase = name.replace("""proj""" , """projection""" )
if "blocks" in name:
_lowerCAmelCase = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
_lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
_lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
_lowerCAmelCase = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
_lowerCAmelCase = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
_lowerCAmelCase = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
_lowerCAmelCase = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
_lowerCAmelCase = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
_lowerCAmelCase = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
_lowerCAmelCase = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCAmelCase = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_lowerCAmelCase = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
_lowerCAmelCase = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
_lowerCAmelCase = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
_lowerCAmelCase = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
_lowerCAmelCase = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
_lowerCAmelCase = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
_lowerCAmelCase = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
_lowerCAmelCase = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
_lowerCAmelCase = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
_lowerCAmelCase = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
_lowerCAmelCase = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dpt_config(lowerCAmelCase )
# load original state_dict from URL
_lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(lowerCAmelCase )
_lowerCAmelCase = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
_lowerCAmelCase = DPTForSemanticSegmentation(lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# Check outputs on an image
_lowerCAmelCase = 4_80 if """ade""" in checkpoint_url else 3_84
_lowerCAmelCase = DPTImageProcessor(size=lowerCAmelCase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""pt""" )
# forward pass
_lowerCAmelCase = model(**lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**lowerCAmelCase ).predicted_depth
# Assert logits
_lowerCAmelCase = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
_lowerCAmelCase = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase )
)
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
A__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
A__ : Optional[int] =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 207 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase_ :
lowerCamelCase_ = BlenderbotSmallConfig
lowerCamelCase_ = {}
lowerCamelCase_ = "gelu"
def __init__( self :Optional[Any] , __A :Optional[int] , __A :Union[str, Any]=13 , __A :Tuple=7 , __A :Optional[int]=True , __A :int=False , __A :Optional[int]=99 , __A :List[Any]=32 , __A :Dict=2 , __A :List[str]=4 , __A :List[Any]=37 , __A :List[Any]=0.1 , __A :Union[str, Any]=0.1 , __A :Union[str, Any]=20 , __A :Optional[int]=2 , __A :Dict=1 , __A :Optional[Any]=0 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = eos_token_id
SCREAMING_SNAKE_CASE__ = pad_token_id
SCREAMING_SNAKE_CASE__ = bos_token_id
def _snake_case ( self :Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ = prepare_blenderbot_small_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _snake_case ( self :Dict , __A :int , __A :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFBlenderbotSmallModel(config=__A ).get_decoder()
SCREAMING_SNAKE_CASE__ = inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE__ = input_ids[:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["""attention_mask"""][:1, :]
SCREAMING_SNAKE_CASE__ = inputs_dict["""head_mask"""]
SCREAMING_SNAKE_CASE__ = 1
# first forward pass
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A )[0]
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: int=None , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[str]=None , UpperCamelCase__: Optional[int]=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCamelCase_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFBlenderbotSmallModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A )
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowerCamelCase_ = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowerCamelCase_ = "facebook/blenderbot_small-90M"
@cached_property
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(self.src_text , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 59 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(UpperCamelCase__ )
EnvironmentCommand.register_subcommand(UpperCamelCase__ )
TestCommand.register_subcommand(UpperCamelCase__ )
RunBeamCommand.register_subcommand(UpperCamelCase__ )
DummyDataCommand.register_subcommand(UpperCamelCase__ )
# Parse args
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_known_args()
if not hasattr(UpperCamelCase__ , """func""" ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE__ = parse_unknown_args(UpperCamelCase__ )
# Run
SCREAMING_SNAKE_CASE__ = args.func(UpperCamelCase__ , **UpperCamelCase__ )
service.run()
if __name__ == "__main__":
main() | 59 | 1 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : List[Any] = WavaVecaPhonemeCTCTokenizer
a__ : int = False
def _lowercase (self : Optional[Any] ):
super().setUp()
UpperCAmelCase_ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
UpperCAmelCase_ = dict(zip(__a , range(len(__a ) ) ) )
UpperCAmelCase_ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def _lowercase (self : List[str] , __a : Dict , __a : str=False , __a : Union[str, Any]=20 , __a : str=5 ):
UpperCAmelCase_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
UpperCAmelCase_ = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
UpperCAmelCase_ = " " + output_txt
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def _lowercase (self : Optional[Any] , **__a : Optional[int] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
UpperCAmelCase_ = tokenizer("m xxx ɪ" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
UpperCAmelCase_ = tokenizer("m aaa ɪ ccc" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase_ = tokenizer("maɪ c" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 200] ) # mai should be <unk> (=3)
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def _lowercase (self : int ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
UpperCAmelCase_ = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _lowercase (self : str ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
UpperCAmelCase_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
UpperCAmelCase_ = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _lowercase (self : str ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
UpperCAmelCase_ = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
UpperCAmelCase_ = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __a )
def _lowercase (self : Optional[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__a )
UpperCAmelCase_ = "Hello how are you"
UpperCAmelCase_ = tokenizer(__a , phonemizer_lang="en-us" ).input_ids
UpperCAmelCase_ = tokenizer(__a , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__a , __a )
UpperCAmelCase_ = tokenizer.decode(__a )
UpperCAmelCase_ = tokenizer.decode(__a )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__a , "ɛ l o h aʊ a ʁ j u" )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ = "Hello how Are you"
UpperCAmelCase_ = "hello how are you"
UpperCAmelCase_ = tokenizer(__a ).input_ids
UpperCAmelCase_ = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCAmelCase_ = tokenizer.batch_decode(__a )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _lowercase (__a : Tuple , __a : Optional[int] ):
UpperCAmelCase_ = [d[key] for d in offsets]
return retrieved_list
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase_ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase_ = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__a : str , __a : List[Any] ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
UpperCAmelCase_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__a : Optional[Any] , __a : Optional[Any] ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
UpperCAmelCase_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase_ = tokenizer.batch_decode(__a , output_char_offsets=__a )
UpperCAmelCase_ = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _lowercase (self : Union[str, Any] ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _lowercase (self : Union[str, Any] ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _lowercase (self : Optional[Any] ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _lowercase (self : Optional[Any] ):
pass
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
UpperCAmelCase_ = tokenizer.add_tokens(__a )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
UpperCAmelCase_ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase_ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
UpperCAmelCase_ = tokenizer.add_special_tokens(__a )
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
UpperCAmelCase_ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowercase (self : List[str] ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowercase (self : int ):
pass
def _lowercase (self : Any ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase_ = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase_ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output["text"] , __a )
| 78 |
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ = 60_08_51_47_51_43 )-> int:
"""simple docstring"""
try:
UpperCamelCase = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCamelCase = 2
UpperCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCamelCase = i
while n % i == 0:
UpperCamelCase = n // i
i += 1
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 554 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a__ :
def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
__lowerCAmelCase = model(_UpperCAmelCase )
__lowerCAmelCase = model(_UpperCAmelCase , use_cache=_UpperCAmelCase )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) )
self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 )
__lowerCAmelCase = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(_UpperCAmelCase )["last_hidden_state"]
__lowerCAmelCase = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a : str = (TrOCRForCausalLM,) if is_torch_available() else ()
_a : str = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_a : Dict = True
_a : List[str] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_UpperCAmelCase )
__lowerCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
| 714 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( snake_case__ ):
_a : Optional[Any] = """M-CLIP"""
def __init__( self , _A=1_0_2_4 , _A=7_6_8 , **_A ):
"""simple docstring"""
__lowerCAmelCase = transformerDimSize
__lowerCAmelCase = imageDimSize
super().__init__(**_A )
class a__ ( snake_case__ ):
_a : Tuple = MCLIPConfig
def __init__( self , _A , *_A , **_A ):
"""simple docstring"""
super().__init__(_A , *_A , **_A )
__lowerCAmelCase = XLMRobertaModel(_A )
__lowerCAmelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.transformer(input_ids=_A , attention_mask=_A )[0]
__lowerCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_A ), embs
| 552 | 0 |
def __a ( __lowerCAmelCase ) -> bool:
return str(_lowerCAmelCase ) == str(_lowerCAmelCase )[::-1]
def __a ( __lowerCAmelCase ) -> int:
return int(_lowerCAmelCase ) + int(str(_lowerCAmelCase )[::-1] )
def __a ( __lowerCAmelCase = 1_0000 ) -> int:
SCREAMING_SNAKE_CASE : List[Any] = []
for num in range(1 , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : int = num
while iterations < 50:
SCREAMING_SNAKE_CASE : List[str] = sum_reverse(_lowerCAmelCase )
iterations += 1
if is_palindrome(_lowerCAmelCase ):
break
else:
lychrel_nums.append(_lowerCAmelCase )
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 352 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ , A_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
A_ : List[Any] = "A painting of a squirrel eating a burger"
A_ : int = jax.device_count()
A_ : Union[str, Any] = num_samples * [prompt]
A_ : Dict = sd_pipe.prepare_inputs(snake_case )
A_ : List[Any] = replicate(snake_case )
A_ : str = shard(snake_case )
A_ : Any = jax.random.PRNGKey(0 )
A_ : Dict = jax.random.split(snake_case , jax.device_count() )
A_ : int = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=25 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : Union[str, Any] = images[0, 253:256, 253:256, -1]
A_ : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : List[str] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : str = "stabilityai/stable-diffusion-2"
A_ , A_ : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case , subfolder="scheduler" )
A_ , A_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
snake_case , scheduler=snake_case , revision="bf16" , dtype=jnp.bfloataa , )
A_ : Any = scheduler_params
A_ : Optional[Any] = "A painting of a squirrel eating a burger"
A_ : Dict = jax.device_count()
A_ : List[str] = num_samples * [prompt]
A_ : int = sd_pipe.prepare_inputs(snake_case )
A_ : Optional[Any] = replicate(snake_case )
A_ : Dict = shard(snake_case )
A_ : Any = jax.random.PRNGKey(0 )
A_ : Union[str, Any] = jax.random.split(snake_case , jax.device_count() )
A_ : Union[str, Any] = sd_pipe(snake_case , snake_case , snake_case , num_inference_steps=25 , jit=snake_case )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
A_ : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
A_ : Union[str, Any] = images[0, 253:256, 253:256, -1]
A_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
A_ : str = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 454 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
# Construct model
if openai_config_file == "":
UpperCAmelCase__ = OpenAIGPTConfig()
else:
UpperCAmelCase__ = OpenAIGPTConfig.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = OpenAIGPTModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCAmelCase__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCAmelCase__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
a : str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 422 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase ):
super().__init__()
UpperCAmelCase__ = torchvision.models.resnetaaa(pretrained=__lowercase )
UpperCAmelCase__ = list(model.children() )[:-2]
UpperCAmelCase__ = nn.Sequential(*__lowercase )
UpperCAmelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self , __lowercase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase__ = self.pool(self.model(__lowercase ) )
UpperCAmelCase__ = torch.flatten(__lowercase , start_dim=2 )
UpperCAmelCase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = [json.loads(__lowercase ) for l in open(__lowercase )]
UpperCAmelCase__ = os.path.dirname(__lowercase )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = labels
UpperCAmelCase__ = len(__lowercase )
UpperCAmelCase__ = max_seq_length
UpperCAmelCase__ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , __lowercase ):
UpperCAmelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowercase ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase__ = sentence[: self.max_seq_length]
UpperCAmelCase__ = torch.zeros(self.n_classes )
UpperCAmelCase__ = 1
UpperCAmelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
UpperCAmelCase__ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self ):
UpperCAmelCase__ = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = [len(row["""sentence"""] ) for row in batch]
UpperCAmelCase__ , UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = input_row["""sentence"""]
UpperCAmelCase__ = 1
UpperCAmelCase__ = torch.stack([row["""image"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""label"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_start_token"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ) ->int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ) ->str:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 422 | 1 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
snake_case__ = logging.getLogger(__name__)
snake_case__ = """Hello world! cécé herlolip"""
snake_case__ = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ):
lowercase : List[str] = BertAbsConfig(
temp_dir='''.''' , finetune_bert=UpperCAmelCase_ , large=UpperCAmelCase_ , share_emb=UpperCAmelCase_ , use_bert_emb=UpperCAmelCase_ , encoder='''bert''' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
lowercase : str = torch.load(UpperCAmelCase_ , lambda UpperCAmelCase_ , UpperCAmelCase_ : storage )
lowercase : Optional[Any] = AbsSummarizer(UpperCAmelCase_ , torch.device('''cpu''' ) , UpperCAmelCase_ )
original.eval()
lowercase : int = BertAbsSummarizer(UpperCAmelCase_ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
lowercase : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
lowercase : Dict = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(UpperCAmelCase_ )) )
lowercase : Tuple = torch.tensor(UpperCAmelCase_ ).unsqueeze(0 )
lowercase : Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(UpperCAmelCase_ )) )
lowercase : List[str] = torch.tensor(UpperCAmelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowercase : Tuple = encoder_input_ids
lowercase : str = decoder_input_ids
lowercase : Any = None
lowercase : Union[str, Any] = None
lowercase : Union[str, Any] = None
lowercase : int = None
lowercase : List[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowercase : Optional[int] = original(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )[0]
lowercase : int = original.generator(UpperCAmelCase_ )
lowercase : int = new_model(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )[0]
lowercase : Union[str, Any] = new_model.generator(UpperCAmelCase_ )
lowercase : Optional[int] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(UpperCAmelCase_ ) )
lowercase : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(UpperCAmelCase_ ) )
lowercase : List[Any] = torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
snake_case__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 583 |
snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowerCamelCase_ ( ):
lowercase : Optional[Any] = input('''Enter message: ''' )
lowercase : Optional[Any] = input('''Enter key [alphanumeric]: ''' )
lowercase : Union[str, Any] = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowercase : str = '''encrypt'''
lowercase : Optional[Any] = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
elif mode.lower().startswith('''d''' ):
lowercase : str = '''decrypt'''
lowercase : Optional[int] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''encrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return translate_message(UpperCAmelCase_ , UpperCAmelCase_ , '''decrypt''' )
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
lowercase : Optional[Any] = []
lowercase : Tuple = 0
lowercase : str = key.upper()
for symbol in message:
lowercase : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase_ ):
lowercase : List[str] = 0
else:
translated.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 583 | 1 |
"""simple docstring"""
from PIL import Image
def A_ ( __UpperCamelCase : Image , __UpperCamelCase : int ):
lowercase = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__UpperCamelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__lowerCAmelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 712 |
"""simple docstring"""
from PIL import Image
def A_ ( __UpperCamelCase : Image , __UpperCamelCase : int ):
lowercase = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(__UpperCamelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__lowerCAmelCase = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''') | 396 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: int = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowercase__: Tuple = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_UpperCAmelCase )
from datasets import load_dataset
lowercase__: Optional[Any] = load_dataset('''nielsr/rvlcdip-demo''' )
lowercase__: Dict = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowercase__: Any = image_processor(_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__: List[Any] = model(**_UpperCAmelCase )
lowercase__: List[str] = outputs.logits
lowercase__: Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , _UpperCAmelCase )
lowercase__: Union[str, Any] = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=_UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 586 | """simple docstring"""
import math
import qiskit
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1 , __UpperCAmelCase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowercase__: Dict = qiskit.QuantumRegister(4 , '''qr''' )
lowercase__: List[Any] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowercase__: str = [input_a, input_a, carry_in]
lowercase__: Tuple = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCAmelCase ) # measure the last two qbits
lowercase__: str = qiskit.Aer.get_backend('''aer_simulator''' )
lowercase__: List[Any] = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1_0_0_0 )
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 586 | 1 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCamelCase :Optional[int] = 'scheduler_config.json'
class UpperCAmelCase ( _snake_case ):
a: Any = 1
a: Tuple = 2
a: Tuple = 3
a: Union[str, Any] = 4
a: Tuple = 5
@dataclass
class UpperCAmelCase ( _snake_case ):
a: Union[str, Any] = 42
class UpperCAmelCase :
a: Any = SCHEDULER_CONFIG_NAME
a: str = ["dtype"]
a: Dict = []
a: Optional[int] = True
@classmethod
def _A ( cls: Union[str, Any] , __UpperCamelCase: Dict[str, Any] = None , __UpperCamelCase: Optional[str] = None , __UpperCamelCase: str=False , **__UpperCamelCase: Optional[int] , ):
_a , _a = cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
_a , _a = cls.from_config(lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , '''create_state''' ) and getattr(lowerCAmelCase__ , '''has_state''' , lowerCAmelCase__ ):
_a = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _A ( self: List[Any] , __UpperCamelCase: Union[str, os.PathLike] , __UpperCamelCase: bool = False , **__UpperCamelCase: int ):
self.save_config(save_directory=lowerCAmelCase__ , push_to_hub=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self: Any ):
return self._get_compatibles()
@classmethod
def _A ( cls: str ):
_a = list(set([cls.__name__] + cls._compatibles ) )
_a = importlib.import_module(__name__.split('''.''' )[0] )
_a = [
getattr(lowerCAmelCase__ , lowerCAmelCase__ ) for c in compatible_classes_str if hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
]
return compatible_classes
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
assert len(__A ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__A ) - x.ndim) ) , __A )
def __snake_case ( _UpperCamelCase , _UpperCamelCase=0.9_99 , _UpperCamelCase=jnp.floataa ) -> Dict:
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_a = []
for i in range(__A ):
_a = i / num_diffusion_timesteps
_a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__A ) / alpha_bar(__A ) , __A ) )
return jnp.array(__A , dtype=__A )
@flax.struct.dataclass
class UpperCAmelCase :
a: List[str] = 42
a: Tuple = 42
a: Tuple = 42
@classmethod
def _A ( cls: int , __UpperCamelCase: int ):
_a = scheduler.config
if config.trained_betas is not None:
_a = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_a = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" )
_a = 1.0 - betas
_a = jnp.cumprod(lowerCAmelCase__ , axis=0 )
return cls(
alphas=lowerCAmelCase__ , betas=lowerCAmelCase__ , alphas_cumprod=lowerCAmelCase__ , )
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
_a = state.alphas_cumprod
_a = alphas_cumprod[timesteps] ** 0.5
_a = sqrt_alpha_prod.flatten()
_a = broadcast_to_shape_from_left(__A , original_samples.shape )
_a = (1 - alphas_cumprod[timesteps]) ** 0.5
_a = sqrt_one_minus_alpha_prod.flatten()
_a = broadcast_to_shape_from_left(__A , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
_a , _a = get_sqrt_alpha_prod(__A , __A , __A , __A )
_a = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
_a , _a = get_sqrt_alpha_prod(__A , __A , __A , __A )
_a = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 701 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase :Dict = random.Random()
def __snake_case ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self: Tuple , __UpperCamelCase: Dict , __UpperCamelCase: int=7 , __UpperCamelCase: Any=400 , __UpperCamelCase: List[str]=2000 , __UpperCamelCase: Union[str, Any]=2048 , __UpperCamelCase: int=128 , __UpperCamelCase: Optional[int]=1 , __UpperCamelCase: Tuple=512 , __UpperCamelCase: List[Any]=30 , __UpperCamelCase: Dict=4_4100 , ):
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = spectrogram_length
_a = feature_size
_a = num_audio_channels
_a = hop_length
_a = chunk_length
_a = sampling_rate
def _A ( self: int ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _A ( self: List[Any] , __UpperCamelCase: List[Any]=False , __UpperCamelCase: List[str]=False ):
def _flatten(__UpperCamelCase: Tuple ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
a: Union[str, Any] = TvltFeatureExtractor
def _A ( self: Optional[Any] ):
_a = TvltFeatureExtractionTester(self )
def _A ( self: Optional[Any] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''sampling_rate''' ) )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_a = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(__UpperCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__UpperCamelCase )
_a = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_a = feat_extract_first.to_dict()
_a = feat_extract_second.to_dict()
_a = dict_first.pop('''mel_filters''' )
_a = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
# Initialize feature_extractor
_a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_a = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_a = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_a = feature_extractor(
__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(__UpperCamelCase )
_a = feature_extractor(__UpperCamelCase , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _A ( self: Optional[int] , __UpperCamelCase: Dict ):
_a = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_a = ds.sort('''id''' ).select(range(__UpperCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _A ( self: Optional[Any] ):
_a = self._load_datasamples(1 )
_a = TvltFeatureExtractor()
_a = feature_extractor(__UpperCamelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_a = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __UpperCamelCase , atol=1E-4 ) )
| 346 | 0 |
import numpy as np
import qiskit
def UpperCamelCase ( __magic_name__ : int = 8 , __magic_name__ : int | None = None ) -> str:
"""simple docstring"""
lowercase__ = np.random.default_rng(seed=__magic_name__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowercase__ = 6 * key_len
# Measurement basis for Alice's qubits.
lowercase__ = rng.integers(2 , size=__magic_name__ )
# The set of states Alice will prepare.
lowercase__ = rng.integers(2 , size=__magic_name__ )
# Measurement basis for Bob's qubits.
lowercase__ = rng.integers(2 , size=__magic_name__ )
# Quantum Circuit to simulate BB84
lowercase__ = qiskit.QuantumCircuit(__magic_name__ , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__magic_name__ ):
if alice_state[index] == 1:
bbaa_circ.x(__magic_name__ )
if alice_basis[index] == 1:
bbaa_circ.h(__magic_name__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__magic_name__ ):
if bob_basis[index] == 1:
bbaa_circ.h(__magic_name__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowercase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowercase__ = qiskit.execute(__magic_name__ , __magic_name__ , shots=1 , seed_simulator=__magic_name__ )
# Returns the result of measurement.
lowercase__ = job.result().get_counts(__magic_name__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowercase__ = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__magic_name__ , __magic_name__ , __magic_name__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowercase__ = gen_key[:key_len] if len(__magic_name__ ) >= key_len else gen_key.ljust(__magic_name__ , """0""" )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 15 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A : Any = 3_0_0 # TEMPERATURE (unit = K)
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ :
lowerCAmelCase__ : Optional[int] = "dummy_data"
lowerCAmelCase__ : str = "datasets"
lowerCAmelCase__ : Dict = False
def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 0
__lowercase = dataset_name
__lowercase = cache_dir
__lowercase = use_local_dummy_data
__lowercase = config
# download_callbacks take a single url as input
__lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowercase = str(_UpperCAmelCase )
# to be downloaded
__lowercase = None
__lowercase = None
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self._dummy_file is None:
__lowercase = self.download_dummy_data()
return self._dummy_file
@property
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowercase = cached_path(
_UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase )
return os.path.join(_UpperCAmelCase , self.dummy_file_name )
@property
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self._bucket_url is None:
__lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase )
else:
return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_UpperCAmelCase )
def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
return path
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return {}
def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for single_url in single_urls:
download_callback(_UpperCAmelCase )
else:
__lowercase = single_urls
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls]
else:
__lowercase = single_urls
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) )
__lowercase = value
# make sure that values are unique
if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url )
__lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__lowercase = [data_url[0]] * len(_UpperCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_UpperCAmelCase )
return dummy_data_list
def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_UpperCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def a__ ( self : int ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
def _iter_archive_members(_UpperCAmelCase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__lowercase = Path(self.dummy_file ).parent
__lowercase = path.relative_to(_UpperCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_UpperCAmelCase )
__lowercase = Path(_UpperCAmelCase )
__lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' )
def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = [paths]
for path in paths:
if os.path.isfile(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ):
if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_UpperCAmelCase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
| 688 | 1 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : str , a_ : float , a_ : Callable , a_ : int , a_ : float = 1.0 , a_ : str = None , ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ = initial_learning_rate
lowerCamelCase__ = warmup_steps
lowerCamelCase__ = power
lowerCamelCase__ = decay_schedule_fn
lowerCamelCase__ = name
def __call__( self : List[str] , a_ : Any ):
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCamelCase__ = tf.cast(a_ , tf.floataa )
lowerCamelCase__ = tf.cast(self.warmup_steps , tf.floataa )
lowerCamelCase__ = global_step_float / warmup_steps_float
lowerCamelCase__ = self.initial_learning_rate * tf.math.pow(a_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=a_ , )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def snake_case (UpperCamelCase : float , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : float = 0.0 , UpperCamelCase : float = 0.9 , UpperCamelCase : float = 0.9_9_9 , UpperCamelCase : float = 1e-8 , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : float = 0.0 , UpperCamelCase : float = 1.0 , UpperCamelCase : Optional[List[str]] = None , ):
'''simple docstring'''
lowerCamelCase__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase , )
if num_warmup_steps:
lowerCamelCase__ = WarmUp(
initial_learning_rate=UpperCamelCase , decay_schedule_fn=UpperCamelCase , warmup_steps=UpperCamelCase , )
if weight_decay_rate > 0.0:
lowerCamelCase__ = AdamWeightDecay(
learning_rate=UpperCamelCase , weight_decay_rate=UpperCamelCase , beta_a=UpperCamelCase , beta_a=UpperCamelCase , epsilon=UpperCamelCase , clipnorm=UpperCamelCase , global_clipnorm=UpperCamelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCamelCase , )
else:
lowerCamelCase__ = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase , beta_a=UpperCamelCase , beta_a=UpperCamelCase , epsilon=UpperCamelCase , clipnorm=UpperCamelCase , global_clipnorm=UpperCamelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , a_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , a_ : float = 0.9 , a_ : float = 0.9_9_9 , a_ : float = 1e-7 , a_ : bool = False , a_ : float = 0.0 , a_ : Optional[List[str]] = None , a_ : Optional[List[str]] = None , a_ : str = "AdamWeightDecay" , **a_ : List[Any] , ):
"""simple docstring"""
super().__init__(a_ , a_ , a_ , a_ , a_ , a_ , **a_ )
lowerCamelCase__ = weight_decay_rate
lowerCamelCase__ = include_in_weight_decay
lowerCamelCase__ = exclude_from_weight_decay
@classmethod
def _UpperCamelCase ( cls : Tuple , a_ : List[str] ):
"""simple docstring"""
lowerCamelCase__ = {"""WarmUp""": WarmUp}
return super(a_ , cls ).from_config(a_ , custom_objects=a_ )
def _UpperCamelCase ( self : int , a_ : str , a_ : Union[str, Any] , a_ : Any ):
"""simple docstring"""
super(a_ , self )._prepare_local(a_ , a_ , a_ )
lowerCamelCase__ = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _UpperCamelCase ( self : Optional[Any] , a_ : List[Any] , a_ : Tuple , a_ : int ):
"""simple docstring"""
lowerCamelCase__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _UpperCamelCase ( self : Any , a_ : int , a_ : List[str]=None , **a_ : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = list(zip(*a_ ) )
return super(a_ , self ).apply_gradients(zip(a_ , a_ ) , name=a_ , **a_ )
def _UpperCamelCase ( self : Dict , a_ : List[str] , a_ : Tuple , a_ : str ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCamelCase__ = apply_state or {}
lowerCamelCase__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCamelCase__ = self._fallback_apply_state(a_ , a_ )
lowerCamelCase__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _UpperCamelCase ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : Dict , a_ : Union[str, Any]=None ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self._get_lr(var.device , var.dtype.base_dtype , a_ )
lowerCamelCase__ = self._decay_weights_op(a_ , a_ , a_ )
with tf.control_dependencies([decay] ):
return super(a_ , self )._resource_apply_dense(a_ , a_ , **a_ )
def _UpperCamelCase ( self : Dict , a_ : Optional[int] , a_ : int , a_ : Optional[int] , a_ : List[Any]=None ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ = self._get_lr(var.device , var.dtype.base_dtype , a_ )
lowerCamelCase__ = self._decay_weights_op(a_ , a_ , a_ )
with tf.control_dependencies([decay] ):
return super(a_ , self )._resource_apply_sparse(a_ , a_ , a_ , **a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _UpperCamelCase ( self : Optional[int] , a_ : int ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(a_ , a_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(a_ , a_ ) is not None:
return False
return True
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : str ):
"""simple docstring"""
lowerCamelCase__ = []
lowerCamelCase__ = None
@property
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self._accum_steps is None:
lowerCamelCase__ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=a_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _UpperCamelCase ( self : int ):
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , a_ : List[Any] ):
"""simple docstring"""
if not self._gradients:
lowerCamelCase__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(a_ ) , trainable=a_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(a_ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(a_ )}''' )
for accum_gradient, gradient in zip(self._gradients , a_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(a_ )
self._accum_steps.assign_add(1 )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(a_ ) )
| 165 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Optional[int] = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = 'data2vec-vision'
def __init__( self : Any , a_ : Union[str, Any]=7_68 , a_ : int=12 , a_ : Union[str, Any]=12 , a_ : Any=30_72 , a_ : Any="gelu" , a_ : Union[str, Any]=0.0 , a_ : Tuple=0.0 , a_ : Any=0.0_2 , a_ : int=1e-12 , a_ : Optional[Any]=2_24 , a_ : List[Any]=16 , a_ : Dict=3 , a_ : Optional[Any]=False , a_ : Union[str, Any]=False , a_ : int=False , a_ : Any=False , a_ : Tuple=0.1 , a_ : Tuple=0.1 , a_ : Dict=True , a_ : Tuple=[3, 5, 7, 11] , a_ : int=[1, 2, 3, 6] , a_ : List[Any]=True , a_ : Any=0.4 , a_ : int=2_56 , a_ : Any=1 , a_ : List[str]=False , a_ : List[str]=2_55 , **a_ : Dict , ):
"""simple docstring"""
super().__init__(**a_ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = use_mask_token
lowerCamelCase__ = use_absolute_position_embeddings
lowerCamelCase__ = use_relative_position_bias
lowerCamelCase__ = use_shared_relative_position_bias
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ = out_indices
lowerCamelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ = use_auxiliary_head
lowerCamelCase__ = auxiliary_loss_weight
lowerCamelCase__ = auxiliary_channels
lowerCamelCase__ = auxiliary_num_convs
lowerCamelCase__ = auxiliary_concat_input
lowerCamelCase__ = semantic_loss_ignore_index
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
snake_case_ = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return 1e-4
| 165 | 1 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : Dict = TypeVar("T")
class lowerCamelCase (Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] , __magic_name__ : bool = True ) -> None:
SCREAMING_SNAKE_CASE_ = {} # dictionary of lists
SCREAMING_SNAKE_CASE_ = directed
def __A ( self : Dict , __magic_name__ : T , __magic_name__ : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
self.adj_list[destination_vertex].append(__magic_name__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE_ = [destination_vertex]
SCREAMING_SNAKE_CASE_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE_ = [destination_vertex]
SCREAMING_SNAKE_CASE_ = []
return self
def __repr__( self : int ) -> str:
return pformat(self.adj_list )
| 356 | from collections.abc import Callable
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = a
SCREAMING_SNAKE_CASE_ = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCamelCase ) == 0:
return b
elif (
function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
SCREAMING_SNAKE_CASE_ = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(__UpperCamelCase ) == 0:
return mid
elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0:
SCREAMING_SNAKE_CASE_ = mid
else:
SCREAMING_SNAKE_CASE_ = mid
SCREAMING_SNAKE_CASE_ = start + (end - start) / 2.0
return mid
def a__ ( __UpperCamelCase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 356 | 1 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
snake_case__ = 1_0_0_0_0
snake_case__ = None
snake_case__ = None
class a ( datasets.ArrowBasedBuilder ):
snake_case__ = ParquetConfig
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
lowerCAmelCase = data_files
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCAmelCase = [dl_manager.iter_files(_snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_snake_case ):
with open(_snake_case , 'rb' ) as f:
lowerCAmelCase = datasets.Features.from_arrow_schema(pq.read_schema(_snake_case ) )
break
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCAmelCase = table_cast(_snake_case , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case , 'rb' ) as f:
lowerCAmelCase = pq.ParquetFile(_snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCAmelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(_snake_case )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_snake_case )}: {e}' )
raise
| 4 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCamelCase : Optional[int] = pytest.mark.integration
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
lowerCAmelCase = dset.map(
lambda _snake_case , _snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_snake_case , keep_in_memory=_snake_case )
lowerCAmelCase = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCAmelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_snake_case )
lowerCAmelCase ,lowerCAmelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertRaises(_snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
self.assertRaises(_snake_case , index.search_batch , queries[0] )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_snake_case ):
lowerCAmelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = faiss.IndexFlat(5 )
lowerCAmelCase = FaissIndex(custom_index=_snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_snake_case ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ):
import faiss
lowerCAmelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase = 'index.faiss'
lowerCAmelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCAmelCase = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase = 1
lowerCAmelCase ,lowerCAmelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCAmelCase = Elasticsearch()
lowerCAmelCase = {'acknowledged': True}
lowerCAmelCase = ElasticSearchIndex(es_client=_snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase = 'foo'
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCAmelCase ,lowerCAmelCase = index.search(_snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
# batched queries with timeout
lowerCAmelCase = ['foo', 'bar', 'foobar']
lowerCAmelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCAmelCase ,lowerCAmelCase = index.search_batch(_snake_case , request_timeout=30 )
lowerCAmelCase = [scores[0] for scores in total_scores]
lowerCAmelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , _snake_case )
| 4 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__A = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
for attribute in key.split('.' ):
lowerCAmelCase__ :Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowerCAmelCase__ :Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
lowerCAmelCase__ :str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCAmelCase__ :Dict = value
elif weight_type == "weight_g":
lowerCAmelCase__ :Tuple = value
elif weight_type == "weight_v":
lowerCAmelCase__ :int = value
elif weight_type == "bias":
lowerCAmelCase__ :List[str] = value
else:
lowerCAmelCase__ :str = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Any = fairseq_model.state_dict()
lowerCAmelCase__ :Optional[int] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowerCAmelCase__ :Optional[Any] = None
for name, value in fairseq_dict.items():
lowerCAmelCase__ :List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase__ :Dict = True
elif name.split('.' )[0] == "proj":
lowerCAmelCase__ :Optional[int] = fairseq_model.proj
lowerCAmelCase__ :Dict = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase__ :Dict = True
if "*" in mapped_key:
lowerCAmelCase__ :Tuple = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowerCAmelCase__ :Tuple = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowerCAmelCase__ :int = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase__ :str = 'weight_v'
elif "bias" in name:
lowerCAmelCase__ :Tuple = 'bias'
elif "weight" in name:
lowerCAmelCase__ :Optional[Any] = 'weight'
else:
lowerCAmelCase__ :Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"Unused weights: {unused_weights}" )
return proj_weight
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = full_name.split('conv_layers.' )[-1]
lowerCAmelCase__ :Union[str, Any] = name.split('.' )
lowerCAmelCase__ :List[Any] = int(items[0] )
lowerCAmelCase__ :int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCAmelCase__ :Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCAmelCase__ :List[Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCAmelCase__ :Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCAmelCase__ :Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = emb.weight.shape
lowerCAmelCase__ :Dict = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = emb.weight.data
return lin_layer
def __A (_SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ :str = f.readlines()
lowerCAmelCase__ :Optional[Any] = [line.split(' ' )[0] for line in lines]
lowerCAmelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Any = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowerCAmelCase__ :Union[str, Any] = model[0].eval()
# set weights for wav2vec2 encoder
lowerCAmelCase__ :Optional[Any] = WavaVecaModel(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ :Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowerCAmelCase__ :Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
lowerCAmelCase__ :List[Any] = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = False
# add projection layer
lowerCAmelCase__ :Optional[Any] = nn.Parameter(projection_layer.weight )
lowerCAmelCase__ :List[str] = nn.Parameter(projection_layer.bias )
lowerCAmelCase__ :Dict = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = hf_wavavec.config.to_dict()
lowerCAmelCase__ :List[str] = tokenizer.pad_token_id
lowerCAmelCase__ :Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase__ :Any = tokenizer.eos_token_id
lowerCAmelCase__ :Dict = 'speech_to_text_2'
lowerCAmelCase__ :Optional[Any] = 'wav2vec2'
lowerCAmelCase__ :Union[str, Any] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
__A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 560 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __A (_SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , *,
__UpperCAmelCase = np.inf , __UpperCAmelCase = "linear" , __UpperCAmelCase = 0.0 , ):
'''simple docstring'''
lowerCAmelCase__ :str = regularization
lowerCAmelCase__ :Optional[int] = gamma
if kernel == "linear":
lowerCAmelCase__ :Union[str, Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
lowerCAmelCase__ :int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ :Dict = F"Unknown kernel: {kernel}"
raise ValueError(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return np.dot(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = observations
lowerCAmelCase__ :int = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) :Union[str, Any] = np.shape(__UpperCAmelCase )
def to_minimize(__UpperCAmelCase ) -> float:
lowerCAmelCase__ :Optional[Any] = 0
((lowerCAmelCase__) , ) :Optional[int] = np.shape(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = LinearConstraint(__UpperCAmelCase , 0 , 0 )
lowerCAmelCase__ :Optional[Any] = Bounds(0 , self.regularization )
lowerCAmelCase__ :Dict = minimize(
__UpperCAmelCase , np.ones(__UpperCAmelCase ) , bounds=__UpperCAmelCase , constraints=[ly_contraint] ).x
lowerCAmelCase__ :Optional[int] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ :Tuple = 0
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCAmelCase__ :int = s / n
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __UpperCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__: List[str] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__magic_name__: List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
__magic_name__: Union[str, Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__magic_name__: Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6_0_0_0,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__magic_name__: int = tempfile.mkdtemp()
__magic_name__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__: Tuple = os.path.join(self.tmpdirname , __snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
# load decoder from hub
__magic_name__: Dict = """hf-internal-testing/ngram-beam-search-decoder"""
def lowerCamelCase__ ( self : Any , **__snake_case : str ) -> Optional[int]:
__magic_name__: Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(__snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : str , **__snake_case : int ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCamelCase__ ( self : int , **__snake_case : List[str] ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__: Dict = self.get_tokenizer()
__magic_name__: Any = self.get_feature_extractor()
__magic_name__: Tuple = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
processor.save_pretrained(self.tmpdirname )
__magic_name__: Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__magic_name__: int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__magic_name__: Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: List[Any] = self.get_decoder()
__magic_name__: int = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Tuple = floats_list((3, 1_0_0_0) )
__magic_name__: List[str] = feature_extractor(__snake_case , return_tensors="""np""" )
__magic_name__: Tuple = processor(__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
__magic_name__: Tuple = self.get_feature_extractor()
__magic_name__: List[str] = self.get_tokenizer()
__magic_name__: str = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = """This is a test string"""
__magic_name__: List[str] = processor(text=__snake_case )
__magic_name__: Tuple = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase__ ( self : int , __snake_case : List[str]=(2, 1_0, 1_6) , __snake_case : List[Any]=7_7 ) -> Dict:
np.random.seed(__snake_case )
return np.random.rand(*__snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Tuple = self.get_tokenizer()
__magic_name__: Any = self.get_decoder()
__magic_name__: Tuple = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__magic_name__: str = processor.decode(__snake_case )
__magic_name__: Optional[int] = decoder.decode_beams(__snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def lowerCamelCase__ ( self : int , __snake_case : Dict ) -> Any:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: List[Any] = self.get_tokenizer()
__magic_name__: int = self.get_decoder()
__magic_name__: Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__magic_name__: Optional[int] = processor.batch_decode(__snake_case )
else:
with get_context(__snake_case ).Pool() as pool:
__magic_name__: Any = processor.batch_decode(__snake_case , __snake_case )
__magic_name__: Dict = list(__snake_case )
with get_context("""fork""" ).Pool() as p:
__magic_name__: List[str] = decoder.decode_beams_batch(__snake_case , __snake_case )
__magic_name__, __magic_name__, __magic_name__: Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__snake_case , decoded_processor.logit_score )
self.assertListEqual(__snake_case , decoded_processor.lm_score )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__magic_name__: List[str] = self.get_feature_extractor()
__magic_name__: Optional[Any] = self.get_tokenizer()
__magic_name__: Optional[int] = self.get_decoder()
__magic_name__: Dict = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: str = self._get_dummy_logits()
__magic_name__: Dict = 1_5
__magic_name__: int = -20.0
__magic_name__: int = -4.0
__magic_name__: Dict = processor.batch_decode(
__snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Optional[int] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , beam_width=__snake_case , beam_prune_logp=__snake_case , token_min_logp=__snake_case , )
__magic_name__: Any = [d[0][0] for d in decoded_decoder_out]
__magic_name__: Optional[int] = [d[0][2] for d in decoded_decoder_out]
__magic_name__: Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __snake_case )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(__snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __snake_case , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: int = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Union[str, Any] = self.get_decoder()
__magic_name__: str = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
__magic_name__: Any = self._get_dummy_logits()
__magic_name__: Union[str, Any] = 2.0
__magic_name__: Optional[Any] = 5.0
__magic_name__: Optional[Any] = -20.0
__magic_name__: List[str] = True
__magic_name__: List[Any] = processor.batch_decode(
__snake_case , alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
__magic_name__: Union[str, Any] = decoded_processor_out.text
__magic_name__: Union[str, Any] = list(__snake_case )
decoder.reset_params(
alpha=__snake_case , beta=__snake_case , unk_score_offset=__snake_case , lm_score_boundary=__snake_case , )
with get_context("""fork""" ).Pool() as pool:
__magic_name__: str = decoder.decode_beams_batch(
__snake_case , __snake_case , )
__magic_name__: List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __snake_case )
__magic_name__: List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: Union[str, Any] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: Optional[int] = os.listdir(__snake_case )
__magic_name__: Union[str, Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Any:
__magic_name__: int = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained(__snake_case )
__magic_name__: Any = processor.decoder.model_container[processor.decoder._model_key]
__magic_name__: int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__magic_name__: str = os.listdir(__snake_case )
__magic_name__: Tuple = os.listdir(__snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
__magic_name__: List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: List[str] = floats_list((3, 1_0_0_0) )
__magic_name__: Tuple = processor_wavaveca(__snake_case , return_tensors="""np""" )
__magic_name__: Optional[Any] = processor_auto(__snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__magic_name__: int = self._get_dummy_logits()
__magic_name__: List[Any] = processor_wavaveca.batch_decode(__snake_case )
__magic_name__: Union[str, Any] = processor_auto.batch_decode(__snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = self.get_feature_extractor()
__magic_name__: Any = self.get_tokenizer()
__magic_name__: Dict = self.get_decoder()
__magic_name__: List[str] = WavaVecaProcessorWithLM(tokenizer=__snake_case , feature_extractor=__snake_case , decoder=__snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def lowerCamelCase__ ( __snake_case : Optional[int] , __snake_case : int ) -> int:
__magic_name__: Any = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__magic_name__: Tuple = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Tuple = self._get_dummy_logits()[0]
__magic_name__: List[Any] = processor.decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__magic_name__: Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__magic_name__: Optional[int] = self._get_dummy_logits()
__magic_name__: Any = processor.batch_decode(__snake_case , output_word_offsets=__snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__snake_case , __snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
import torch
__magic_name__: List[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__snake_case )
__magic_name__: Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__magic_name__: Any = iter(__snake_case )
__magic_name__: Optional[int] = next(__snake_case )
__magic_name__: Optional[int] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__magic_name__: Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__magic_name__: List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__magic_name__: List[Any] = model(__snake_case ).logits.cpu().numpy()
__magic_name__: Optional[Any] = processor.decode(logits[0] , output_word_offsets=__snake_case )
__magic_name__: List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__magic_name__: str = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__magic_name__: Tuple = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , __snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(__snake_case , """word""" ) ) , output.text )
# output times
__magic_name__: Dict = torch.tensor(self.get_from_offsets(__snake_case , """start_time""" ) )
__magic_name__: Optional[Any] = torch.tensor(self.get_from_offsets(__snake_case , """end_time""" ) )
# fmt: off
__magic_name__: Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__magic_name__: int = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=0.01 ) )
| 96 |
from ...configuration_utils import PretrainedConfig
lowerCAmelCase__ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'tapas'
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1024 , lowercase=[3, 256, 256, 2, 256, 256, 10] , lowercase=0.02 , lowercase=1e-12 , lowercase=0 , lowercase=10.0 , lowercase=0 , lowercase=1.0 , lowercase=None , lowercase=1.0 , lowercase=False , lowercase=None , lowercase=1.0 , lowercase=1.0 , lowercase=False , lowercase=False , lowercase="ratio" , lowercase=None , lowercase=None , lowercase=64 , lowercase=32 , lowercase=False , lowercase=True , lowercase=False , lowercase=False , lowercase=True , lowercase=False , lowercase=None , lowercase=None , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_sizes
A__ = initializer_range
A__ = layer_norm_eps
# Fine-tuning task hyperparameters
A__ = positive_label_weight
A__ = num_aggregation_labels
A__ = aggregation_loss_weight
A__ = use_answer_as_supervision
A__ = answer_loss_importance
A__ = use_normalized_answer_loss
A__ = huber_loss_delta
A__ = temperature
A__ = aggregation_temperature
A__ = use_gumbel_for_cells
A__ = use_gumbel_for_aggregation
A__ = average_approximation_function
A__ = cell_selection_preference
A__ = answer_loss_cutoff
A__ = max_num_rows
A__ = max_num_columns
A__ = average_logits_per_cell
A__ = select_one_column
A__ = allow_empty_column_selection
A__ = init_cell_selection_weights_to_zero
A__ = reset_position_index_per_cell
A__ = disable_per_token_loss
# Aggregation hyperparameters
A__ = aggregation_labels
A__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowercase ):
A__ = {int(lowercase ): v for k, v in aggregation_labels.items()}
| 514 | 0 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
lowerCAmelCase__ = []
create_all_state(1 , a__ , a__ , [] , a__ )
return result
def _lowerCAmelCase( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , ) -> Dict:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(a__ , total_number - level + 2 ):
current_list.append(a__ )
create_all_state(i + 1 , a__ , level - 1 , a__ , a__ )
current_list.pop()
def _lowerCAmelCase( UpperCAmelCase_ : Optional[int] ) -> str:
for i in total_list:
print(*a__ )
if __name__ == "__main__":
_UpperCamelCase = 4
_UpperCamelCase = 2
_UpperCamelCase = generate_all_combinations(n, k)
print_all_state(total_list)
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A: str = logging.get_logger(__name__)
A: Union[str, Any] = {"vocab_file": "vocab.json"}
A: Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
A: Any = {"mgp-str": 2_7}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[GO]" , _SCREAMING_SNAKE_CASE="[s]" , _SCREAMING_SNAKE_CASE="[GO]" , **_SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase : Optional[int] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return len(self.vocab )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = []
for s in text:
char_tokens.extend(_SCREAMING_SNAKE_CASE )
return char_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("""Vocabulary path ({}) should be a directory""".format(_SCREAMING_SNAKE_CASE ) )
return
UpperCAmelCase : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + """\n""" )
return (vocab_file,)
| 160 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
A: int = logging.get_logger(__name__)
A: List[str] = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _snake_case ( UpperCamelCase : Optional[int] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase : Tuple = k.replace(UpperCamelCase , UpperCamelCase )
if k.startswith("""encoder""" ):
UpperCAmelCase : str = k.replace(""".attn""" , """.self_attn""" )
UpperCAmelCase : str = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCAmelCase : Optional[int] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
UpperCAmelCase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" )
UpperCAmelCase : Dict = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
UpperCAmelCase : Tuple = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : Any = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
UpperCAmelCase : List[Any] = sd.pop(UpperCamelCase )
UpperCAmelCase : Dict = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
UpperCAmelCase : List[str] = v
A: Optional[Any] = ["START"]
@torch.no_grad()
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Tuple ):
UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="""cpu""" )
UpperCAmelCase : str = model["""model"""]
UpperCAmelCase : str = BlenderbotConfig.from_json_file(UpperCamelCase )
UpperCAmelCase : str = BlenderbotForConditionalGeneration(UpperCamelCase )
UpperCAmelCase : List[str] = m.model.state_dict().keys()
UpperCAmelCase : Dict = []
UpperCAmelCase : str = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase : Union[str, Any] = rename_state_dict_key(UpperCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase : Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCamelCase )
m.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
m.half()
m.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
A: Dict = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 160 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
__a : List[str] = CanineTokenizer
__a : Optional[int] = False
def _UpperCAmelCase ( self ):
super().setUp()
UpperCamelCase_ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ):
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
UpperCamelCase_ : int = self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = 10_24
return tokenizer
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = self.canine_tokenizer
UpperCamelCase_ : Optional[Any] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
UpperCamelCase_ : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
UpperCamelCase_ : str = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : List[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.canine_tokenizer
UpperCamelCase_ : List[str] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
UpperCamelCase_ : Tuple = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertIn("""token_type_ids""" , __lowerCAmelCase )
@require_torch
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = self.canine_tokenizer
UpperCamelCase_ : Optional[int] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
UpperCamelCase_ : Optional[int] = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _UpperCAmelCase ( self ):
# safety check on max_len default value so we are sure the test works
UpperCamelCase_ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCamelCase_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase_ : str = tempfile.mkdtemp()
UpperCamelCase_ : Dict = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase_ : Optional[int] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase_ : List[str] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : List[Any] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
UpperCamelCase_ : List[str] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase_ : Dict = tempfile.mkdtemp()
UpperCamelCase_ : List[Any] = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase_ : Optional[Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCamelCase_ : List[Any] = chr(0Xe_007 )
additional_special_tokens.append(__lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCamelCase_ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
UpperCamelCase_ : List[str] = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : List[str] = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn(__lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCamelCase_ : str = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = self.get_clean_sequence(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCamelCase_ : Union[str, Any] = 0Xe_005
UpperCamelCase_ : List[Any] = chr(__lowerCAmelCase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCamelCase_ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
UpperCamelCase_ : Optional[int] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowerCAmelCase )
UpperCamelCase_ : List[str] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , input_encoded + special_token_id )
UpperCamelCase_ : Any = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase_ : Union[str, Any] = chr(0Xe_005 )
UpperCamelCase_ : Union[str, Any] = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
UpperCamelCase_ : Tuple = tokenizer.tokenize(__lowerCAmelCase )
UpperCamelCase_ : int = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(len(__lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , __lowerCAmelCase )
self.assertEqual(token_a[0] , __lowerCAmelCase )
@require_tokenizers
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
UpperCamelCase_ : Union[str, Any] = 0Xe_006
UpperCamelCase_ : List[str] = chr(__lowerCAmelCase )
UpperCamelCase_ : Dict = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCAmelCase )
tokenizer.from_pretrained(__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase_ : str = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase_ : Any = json.load(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCamelCase_ : Any = 0Xe_006
UpperCamelCase_ : str = chr(__lowerCAmelCase )
UpperCamelCase_ : Tuple = [new_token_a]
UpperCamelCase_ : int = [new_token_a]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase_ : Tuple = tokenizer_class.from_pretrained(__lowerCAmelCase , extra_ids=0 )
self.assertIn(__lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCamelCase_ : Tuple = 0Xe_007
UpperCamelCase_ : List[Any] = chr(__lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase_ : Optional[Any] = [AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase )]
UpperCamelCase_ : Optional[int] = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , extra_ids=0 )
self.assertIn(__lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase_ : Tuple = """hello world"""
if self.space_between_special_tokens:
UpperCamelCase_ : Any = """[CLS] hello world [SEP]"""
else:
UpperCamelCase_ : List[Any] = input
UpperCamelCase_ : Dict = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = tokenizer.decode(__lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCAmelCase , [output, output.lower()] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase_ : Union[str, Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
UpperCamelCase_ : str = """a"""
UpperCamelCase_ : Tuple = ord(__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
UpperCamelCase_ : Union[str, Any] = 0Xe_006
UpperCamelCase_ : Union[str, Any] = chr(__lowerCAmelCase )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
pass
| 543 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : int = 0
__a : bool = False
__a : float = 3.0
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_ : int = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase_ : Dict = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase_ : Any = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __lowerCAmelCase )
@require_multi_gpu
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCamelCase =Accelerator(kwargs_handlers=[ddp_scaler])
UpperCamelCase =torch.nn.Linear(100, 200)
UpperCamelCase =accelerator.prepare(model)
# Check the values changed in kwargs
UpperCamelCase =""
UpperCamelCase =model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 543 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Dict = RoCBertTokenizer
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Any = False
__UpperCamelCase : Any = True
__UpperCamelCase : List[str] = filter_non_english
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_shape_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file ,"""w""" ,encoding="""utf-8""" ) as word_shape_writer:
json.dump(lowerCamelCase ,lowerCamelCase ,ensure_ascii=lowerCamelCase )
with open(self.word_pronunciation_file ,"""w""" ,encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(lowerCamelCase ,lowerCamelCase ,ensure_ascii=lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(lowerCamelCase ,["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCamelCase ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase ) ,[5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ,strip_accents=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCamelCase ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=lowerCamelCase ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase ,return_attention_mask=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_offsets_mapping=lowerCamelCase ,add_special_tokens=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase ,"""do_lower_case""" ) else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
__SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase )
]
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你好""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你是谁""" ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase ,lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__SCREAMING_SNAKE_CASE = """你好,你是谁"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,add_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(lowerCamelCase ,add_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
| 109 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 48000,
"sample_size": 65536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 48000,
"sample_size": 131072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 16000,
"sample_size": 65536,
},
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return torch.atana(__UpperCAmelCase , __UpperCAmelCase ) / math.pi * 2
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCAmelCase , __UpperCAmelCase )
class __a ( _snake_case ):
pass
class __a ( nn.Module ):
def __init__( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(lowerCamelCase ,n_attn_layers=4 )
__SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
__SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 ,scramble=lowerCamelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""url"""]
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
a = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
a = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
a = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
a = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
a = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
a = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return name.replace(__UpperCAmelCase , __UpperCAmelCase )
elif name.startswith(__UpperCAmelCase ):
return [name.replace(__UpperCAmelCase , __UpperCAmelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=13 ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
__SCREAMING_SNAKE_CASE = 0
if string.startswith("""net.3.""" ):
depth += 1
__SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith("""net.""" ):
__SCREAMING_SNAKE_CASE = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
__SCREAMING_SNAKE_CASE = string[7:]
if string.startswith("""main.""" ):
__SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
__SCREAMING_SNAKE_CASE = string[:2]
__SCREAMING_SNAKE_CASE = string[2:]
else:
__SCREAMING_SNAKE_CASE = string[0]
__SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
__SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = """mid_block"""
elif depth > 0 and int(__UpperCAmelCase ) < 7:
__SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""down_blocks.{depth}"""
elif depth > 0 and int(__UpperCAmelCase ) > 7:
__SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
__SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
__SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - 1}""" if int(__UpperCAmelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
__SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
__SCREAMING_SNAKE_CASE = convert_resconv_naming(__UpperCAmelCase )
elif "attentions" in new_layer:
__SCREAMING_SNAKE_CASE = convert_attn_naming(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = prefix + """.""" + new_layer + """.""" + string_left
else:
__SCREAMING_SNAKE_CASE = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
__SCREAMING_SNAKE_CASE = rename(__UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = transform_conv_attns(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = v
return new_state_dict
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if len(__UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
__SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
__SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
__SCREAMING_SNAKE_CASE = v.shape[0]
__SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
__SCREAMING_SNAKE_CASE = download(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_rate"""]
__SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]["""sample_size"""]
__SCREAMING_SNAKE_CASE = Object()
__SCREAMING_SNAKE_CASE = sample_size
__SCREAMING_SNAKE_CASE = sample_rate
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=__UpperCAmelCase , sample_rate=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
__SCREAMING_SNAKE_CASE = DiffusionUncond(__UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCAmelCase )["""state_dict"""] )
__SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
__SCREAMING_SNAKE_CASE = orig_model.state_dict()
__SCREAMING_SNAKE_CASE = rename_orig_weights(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCAmelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("""kernel""" ) for k in list(__UpperCAmelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
__SCREAMING_SNAKE_CASE = value.squeeze()
__SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 100
__SCREAMING_SNAKE_CASE = 33
__SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.manual_seed(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=__UpperCAmelCase ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=__UpperCAmelCase )[:-1]
__SCREAMING_SNAKE_CASE = get_crash_schedule(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=__UpperCAmelCase , generator=__UpperCAmelCase ).audios
__SCREAMING_SNAKE_CASE = sampling.iplms_sample(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {} )
__SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
__SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
__SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __UpperCAmelCase )
print("""Diff max""" , __UpperCAmelCase )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
a = parser.parse_args()
main(args)
| 109 | 1 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = [False] * 26
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) ->None:
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_faster()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_fastest()""" , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 603 | '''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class _a ( __a ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**lowercase_ )
lowercase_ = size if size is not None else {"""shortest_edge""": 384}
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = do_resize
lowercase_ = size
# Default value set here for backwards compatibility where the value in config is None
lowercase_ = crop_pct if crop_pct is not None else 224 / 256
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : float , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowercase_ = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowercase_ = int(shortest_edge / crop_pct )
lowercase_ = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_ )
lowercase_ = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ):
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : float = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[str] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowercase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowercase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 603 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="attention" ) -> Optional[int]:
lowercase__ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowercase__ : Any = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowercase__ : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
lowercase__ : List[str] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowercase__ : Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowercase__ : Any = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
if split_mlp_wi:
lowercase__ : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase__ : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase__ : Any = (wi_a, wi_a)
else:
lowercase__ : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase__ : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,*, SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ) -> Union[str, Any]:
lowercase__ : int = traverse_util.flatten_dict(variables["target"] )
lowercase__ : Dict = {"""/""".join(__A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Optional[int] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("Split MLP:" ,__A )
lowercase__ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Optional[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(__A ,__A ,"encoder" ,"pre_attention_layer_norm" )
lowercase__ : Any = tax_attention_lookup(__A ,__A ,"encoder" ,"attention" )
lowercase__ : Optional[int] = layer_norm
lowercase__ : Tuple = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Dict = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (MLP).
lowercase__ : Dict = tax_layer_norm_lookup(__A ,__A ,"encoder" ,"pre_mlp_layer_norm" )
lowercase__ : Any = tax_mlp_lookup(__A ,__A ,"encoder" ,__A )
lowercase__ : Dict = layer_norm
if split_mlp_wi:
lowercase__ : Union[str, Any] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : List[Any] = wi.T
lowercase__ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : Optional[int] = tax_relpos_bias_lookup(
__A ,__A ,"encoder" ).T
lowercase__ : Any = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
lowercase__ : int = tax_relpos_bias_lookup(
__A ,0 ,"encoder" ).T
lowercase__ : Tuple = tax_relpos_bias_lookup(
__A ,0 ,"decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__A ):
# Block i, layer 0 (Self Attention).
lowercase__ : int = tax_layer_norm_lookup(__A ,__A ,"decoder" ,"pre_self_attention_layer_norm" )
lowercase__ : Any = tax_attention_lookup(__A ,__A ,"decoder" ,"self_attention" )
lowercase__ : Union[str, Any] = layer_norm
lowercase__ : str = k.T
lowercase__ : Dict = o.T
lowercase__ : Optional[Any] = q.T
lowercase__ : int = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : int = tax_layer_norm_lookup(__A ,__A ,"decoder" ,"pre_cross_attention_layer_norm" )
lowercase__ : Optional[Any] = tax_attention_lookup(__A ,__A ,"decoder" ,"encoder_decoder_attention" )
lowercase__ : Union[str, Any] = layer_norm
lowercase__ : int = k.T
lowercase__ : Dict = o.T
lowercase__ : Optional[Any] = q.T
lowercase__ : Tuple = v.T
# Block i, layer 2 (MLP).
lowercase__ : int = tax_layer_norm_lookup(__A ,__A ,"decoder" ,"pre_mlp_layer_norm" )
lowercase__ : Union[str, Any] = tax_mlp_lookup(__A ,__A ,"decoder" ,__A )
lowercase__ : List[Any] = layer_norm
if split_mlp_wi:
lowercase__ : Tuple = wi[0].T
lowercase__ : Any = wi[1].T
else:
lowercase__ : Union[str, Any] = wi.T
lowercase__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : int = tax_relpos_bias_lookup(__A ,__A ,"decoder" ).T
lowercase__ : Union[str, Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowercase__ : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Optional[int] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : str = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase__ : Any = state_dict["""shared.weight"""]
return state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowercase__ : int = checkpoints.load_tax_checkpoint(__A )
lowercase__ : Any = convert_tax_to_pytorch(
__A ,num_layers=config.num_layers ,is_encoder_only=__A ,scalable_attention=__A )
lowercase__ : Optional[int] = make_state_dict(__A ,__A )
model.load_state_dict(__A ,strict=__A )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = False ,) -> List[str]:
lowercase__ : List[str] = MTaConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Tuple = UMTaEncoderModel(__A )
else:
lowercase__ : str = UMTaForConditionalGeneration(__A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__A ,__A ,__A ,__A ,__A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__A )
# Verify that we can load the checkpoint.
model.from_pretrained(__A )
print("Done" )
if __name__ == "__main__":
__a : str = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 397 |
import os
import pytest
from attr import dataclass
__lowercase : Optional[int] = '''us-east-1''' # defaults region
@dataclass
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : Dict = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase : Optional[Any] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
__lowerCamelCase : List[str] = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
'''simple docstring'''
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def lowercase ( __A : List[str] ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 36 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Optional[int] = False
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
_SCREAMING_SNAKE_CASE : List[str] = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_SCREAMING_SNAKE_CASE : int = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_SCREAMING_SNAKE_CASE : str = '' if has_file(args.repo_path, """config.json""") else 'unet'
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
_SCREAMING_SNAKE_CASE : Tuple = reader.read()
_SCREAMING_SNAKE_CASE : Dict = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
_SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(**config)
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_SCREAMING_SNAKE_CASE : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_SCREAMING_SNAKE_CASE : List[str] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_SCREAMING_SNAKE_CASE : int = config[key]
del config[key]
_SCREAMING_SNAKE_CASE : int = [k.replace("""UNetRes""", """""") for k in config['down_block_types']]
_SCREAMING_SNAKE_CASE : Tuple = [k.replace("""UNetRes""", """""") for k in config['up_block_types']]
if do_only_weights:
_SCREAMING_SNAKE_CASE : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
_SCREAMING_SNAKE_CASE : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
_SCREAMING_SNAKE_CASE : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
_SCREAMING_SNAKE_CASE : Tuple = param_value
_SCREAMING_SNAKE_CASE : str = True
if not has_changed:
_SCREAMING_SNAKE_CASE : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 702 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_A : int = 20_48
_A : List[Any] = 40_96
_A : Any = 42
_A : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
_A : Union[str, Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( __snake_case : Dict ) -> Optional[Any]:
def choose_first(__snake_case : Any , __snake_case : str=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
lowercase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase : Any = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowercase : Any = {"id": example["id"]}
lowercase : List[str] = example["annotations"]
lowercase : Optional[int] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase : Optional[int] = ["yes"] if 1 in yes_no_answer else ["no"]
lowercase : List[Any] = []
lowercase : Dict = []
lowercase : str = ["<cls>"]
else:
lowercase : int = ["short"]
lowercase : Optional[int] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowercase : Dict = ["long"]
lowercase : Optional[int] = choose_first(annotation["long_answer"] , is_long_answer=__snake_case )
lowercase : int = []
answer.update(__snake_case )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase : str = True
else:
lowercase : List[str] = False
lowercase : Optional[Any] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Tuple=False ) -> Union[str, Any]:
lowercase : Tuple = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : Any = example["document"]["tokens"]
lowercase : List[str] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase : Optional[int] = example["document"]["tokens"]
lowercase : Union[str, Any] = answer["start_token"]
lowercase : List[str] = answer["end_token"]
lowercase : int = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase : Dict = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase : List[str] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase : Any = doc["token"][answer["start_token"] : answer["end_token"]]
lowercase : Dict = " ".join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __snake_case , end="\n" )
print("Old:" , __snake_case , end="\n\n" )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : int=2048 , __snake_case : Optional[Any]=4096 , __snake_case : int=True ) -> Tuple:
# overlap will be of doc_stride - q_len
lowercase : List[Any] = get_context_and_ans(__snake_case , assertion=__snake_case )
lowercase : List[Any] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase : Tuple = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowercase : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : List[str] = []
lowercase : Optional[int] = []
lowercase : Any = input_ids[:q_len]
lowercase : List[Any] = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase : List[Any] = i + max_length - q_len
lowercase : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__snake_case ),
"end_token": [-100] * len(__snake_case ),
"category": category,
},
}
lowercase : List[str] = out["context"].split()
lowercase : Tuple = splitted_context[answer["end_token"]]
lowercase : List[str] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__snake_case , ).input_ids )
lowercase : Tuple = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase : List[str] = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase : Optional[Any] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase : Tuple = answer["start_token"]
lowercase : Optional[Any] = answer["end_token"]
if assertion:
lowercase : str = tokenizer.decode(__snake_case )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __snake_case , end="\n\n" )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase : Dict = input_ids[:q_len]
lowercase : Any = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
lowercase : List[str] = []
lowercase : Any = []
lowercase : Dict = []
lowercase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase : List[str] = i + max_length - q_len
lowercase : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase : List[Any] = start_token - i + q_len
lowercase : str = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowercase : List[Any] = -100
lowercase : Optional[int] = -100
answers_category.append("null" )
lowercase : Optional[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__snake_case ) )
print("Old:" , tokenizer.decode(__snake_case ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=2048 , __snake_case : Optional[int]=4096 , __snake_case : int=False ) -> List[str]:
lowercase : List[str] = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Union[str, Any]:
with jsonlines.open(__snake_case , "a" ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc="Saving samples ... " ):
lowercase : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_A : Union[str, Any] = load_dataset("""natural_questions""")
_A : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_A : Dict = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_A : int = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_A : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_A : List[Any] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_A : str = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 518 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Tuple = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''roberta-prelayernorm'''
def __init__( self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 479 |
from __future__ import annotations
_a : Dict = list[list[int]]
# assigning initial values to the grid
_a : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_a : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase__ ( _A: Matrix , _A: int , _A: int , _A: int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
if location := find_empty_location(_A ):
__lowerCamelCase , __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_A , _A , _A , _A ):
__lowerCamelCase = digit
if sudoku(_A ) is not None:
return grid
__lowerCamelCase = 0
return None
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_A , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
_a : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 479 | 1 |
"""simple docstring"""
from string import ascii_uppercase
_snake_case : Tuple = {char: i for i, char in enumerate(ascii_uppercase)}
_snake_case : Optional[Any] = dict(enumerate(ascii_uppercase))
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
while True:
if x == i:
_lowerCAmelCase = 0
if len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ):
break
key += key[i]
i += 1
return key
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
_lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
_lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = 'THE GERMAN ATTACK'
_lowerCAmelCase = 'SECRET'
_lowerCAmelCase = generate_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = cipher_text(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"""Encrypted Text = {s}""" )
print(f"""Original Text = {original_text(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 711 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
_lowerCAmelCase = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __snake_case ( SCREAMING_SNAKE_CASE: Any , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
_lowerCAmelCase = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __snake_case ( SCREAMING_SNAKE_CASE: List[Any] , SCREAMING_SNAKE_CASE: Optional[Any] , SCREAMING_SNAKE_CASE: List[str] , SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 1000
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = num_labels
_lowerCAmelCase = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) ) , 'r' ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = _lowerCAmelCase = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
_lowerCAmelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
_lowerCAmelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
_lowerCAmelCase = [2, 2, 20]
_lowerCAmelCase = [3, 12, 16]
_lowerCAmelCase = [192, 768, 1024]
_lowerCAmelCase = CvtForImageClassification(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
_lowerCAmelCase = image_size
_lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) )
_lowerCAmelCase = OrderedDict()
_lowerCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
_lowerCAmelCase = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
_lowerCAmelCase = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowerCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=3_8_4,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_snake_case = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 491 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Any = logging.getLogger(__name__)
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCamelCase_ :
'''simple docstring'''
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCamelCase_ :
'''simple docstring'''
UpperCamelCase_ = 42
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase_ = 42
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase=False , UpperCamelCase = False , ) -> Tuple:
UpperCamelCase__ : Dict = hans_processors[task]()
UpperCamelCase__ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , ) , )
UpperCamelCase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ : List[str] = label_list[2], label_list[1]
UpperCamelCase__ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : Optional[Any] = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE):
if os.path.exists(__SCREAMING_SNAKE_CASE) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""")
UpperCamelCase__ : str = torch.load(__SCREAMING_SNAKE_CASE)
else:
logger.info(F"""Creating features from dataset file at {data_dir}""")
UpperCamelCase__ : List[Any] = (
processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
)
logger.info('Training examples: %s' , len(__SCREAMING_SNAKE_CASE))
UpperCamelCase__ : List[Any] = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
logger.info('Saving features into cached file %s' , __SCREAMING_SNAKE_CASE)
torch.save(self.features , __SCREAMING_SNAKE_CASE)
def __len__( self) -> Dict:
return len(self.features)
def __getitem__( self , UpperCamelCase) -> int:
return self.features[i]
def lowerCAmelCase__ ( self) -> Optional[int]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase_ :
'''simple docstring'''
UpperCamelCase_ = 42
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1_28 , UpperCamelCase=False , UpperCamelCase = False , ) -> str:
UpperCamelCase__ : Tuple = hans_processors[task]()
UpperCamelCase__ : Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ : List[Any] = label_list[2], label_list[1]
UpperCamelCase__ : int = label_list
UpperCamelCase__ : Union[str, Any] = processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc='convert examples to features'):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(__SCREAMING_SNAKE_CASE)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[int] = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([]),
'input_ids': tf.TensorShape([None, None]),
'attention_mask': tf.TensorShape([None, None]),
'token_type_ids': tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def lowerCAmelCase__ ( self) -> Optional[Any]:
return self.dataset
def __len__( self) -> Tuple:
return len(self.features)
def __getitem__( self , UpperCamelCase) -> Union[str, Any]:
return self.features[i]
def lowerCAmelCase__ ( self) -> Any:
return self.label_list
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase__ ( self , UpperCamelCase) -> Union[str, Any]:
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , 'heuristics_train_set.txt')) , 'train')
def lowerCAmelCase__ ( self , UpperCamelCase) -> Any:
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , 'heuristics_evaluation_set.txt')) , 'dev')
def lowerCAmelCase__ ( self) -> List[str]:
return ["contradiction", "entailment", "neutral"]
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> Optional[Any]:
UpperCamelCase__ : Optional[int] = []
for i, line in enumerate(__SCREAMING_SNAKE_CASE):
if i == 0:
continue
UpperCamelCase__ : int = """%s-%s""" % (set_type, line[0])
UpperCamelCase__ : Optional[Any] = line[5]
UpperCamelCase__ : int = line[6]
UpperCamelCase__ : Union[str, Any] = line[7][2:] if line[7].startswith('ex') else line[7]
UpperCamelCase__ : Optional[Any] = line[0]
examples.append(InputExample(guid=__SCREAMING_SNAKE_CASE , text_a=__SCREAMING_SNAKE_CASE , text_b=__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE))
return examples
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
UpperCamelCase__ : Optional[int] = {label: i for i, label in enumerate(__SCREAMING_SNAKE_CASE )}
UpperCamelCase__ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(__SCREAMING_SNAKE_CASE ) , desc='convert examples to features' ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCamelCase__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='max_length' , truncation=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : Tuple = int(example.pairID )
features.append(InputFeatures(**__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
UpperCAmelCase__ : int = {
"hans": 3,
}
UpperCAmelCase__ : Tuple = {
"hans": HansProcessor,
}
| 410 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( a_ , a_ ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 711 | '''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : int = 0
snake_case__ : bool = False
snake_case__ : float = 3.0
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase__ ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'a': 2, 'c': 2.2_5} )
@require_cuda
def _UpperCamelCase ( self ) -> Dict:
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE : Union[str, Any] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE : Union[str, Any] = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE : Tuple = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2_000 )
self.assertEqual(scaler._enabled , lowercase__ )
@require_multi_gpu
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCAmelCase :int = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_lowerCAmelCase :int = Accelerator(kwargs_handlers=[ddp_scaler])
_lowerCAmelCase :str = torch.nn.Linear(100, 200)
_lowerCAmelCase :Optional[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
_lowerCAmelCase :Dict = """"""
_lowerCAmelCase :Optional[int] = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 179 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
requires_backends(self , '''vision''' )
self.check_model_type(UpperCAmelCase__ )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] , **UpperCAmelCase__ : str ):
'''simple docstring'''
return {}, {}, {}
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =load_image(UpperCAmelCase__ )
lowercase : Optional[int] =image.size
lowercase : Optional[int] =self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework )
return model_inputs
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Dict =self.model(**UpperCAmelCase__ )
return model_outputs
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =model_outputs.predicted_depth
lowercase : Any =torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=UpperCAmelCase__ )
lowercase : int =prediction.squeeze().cpu().numpy()
lowercase : Tuple =(output * 255 / np.max(UpperCAmelCase__ )).astype('''uint8''' )
lowercase : Any =Image.fromarray(UpperCAmelCase__ )
lowercase : str ={}
lowercase : Dict =predicted_depth
lowercase : Tuple =depth
return output_dict
| 92 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> bool:
return len(set(lowercase__ ) ) == len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 45 | 0 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase : int =[
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 332 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('0.12.2'):
raise Exception('requires fairseq >= 0.12.2')
if version.parse(fairseq.__version__) > version.parse('2'):
raise Exception('requires fairseq < v2')
logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
_UpperCamelCase : Tuple ='Hello, World!'
_UpperCamelCase : List[str] ='en_XX'
def a__ (__lowercase :str , __lowercase :str , __lowercase :bool ) -> Tuple:
_A : Optional[int] = Path('''data_bin''' )
_A : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__lowercase ).parent ) , checkpoint_file=Path(__lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__lowercase )
_A : Union[str, Any] = xmod.model.encoder.sentence_encoder
_A : Tuple = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_A : Dict = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __lowercase )
_A : List[str] = XmodForSequenceClassification(__lowercase ) if classification_head else XmodForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_A : Optional[Any] = xmod_sent_encoder.embed_tokens.weight
_A : Dict = xmod_sent_encoder.embed_positions.weight
_A : List[str] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_A : Tuple = xmod_sent_encoder.layernorm_embedding.weight
_A : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_A : Optional[Any] = model.roberta.encoder.layer[i]
_A : Dict = xmod_sent_encoder.layers[i]
# self attention
_A : int = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_A : Any = xmod_layer.self_attn.q_proj.weight
_A : List[str] = xmod_layer.self_attn.q_proj.bias
_A : str = xmod_layer.self_attn.k_proj.weight
_A : Optional[int] = xmod_layer.self_attn.k_proj.bias
_A : Dict = xmod_layer.self_attn.v_proj.weight
_A : Dict = xmod_layer.self_attn.v_proj.bias
# self-attention output
_A : Dict = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_A : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
_A : List[str] = xmod_layer.self_attn.out_proj.bias
_A : Tuple = xmod_layer.self_attn_layer_norm.weight
_A : Optional[int] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_A : List[Any] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_A : Any = xmod_layer.fca.weight
_A : str = xmod_layer.fca.bias
# output
_A : Optional[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_A : Any = xmod_layer.fca.weight
_A : Union[str, Any] = xmod_layer.fca.bias
_A : Tuple = xmod_layer.final_layer_norm.weight
_A : Tuple = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_A : int = xmod_layer.adapter_layer_norm.weight
_A : Dict = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_A : List[Any] = bert_output.adapter_modules[lang_code]
_A : Dict = xmod_layer.adapter_modules[lang_code]
_A : Tuple = from_adapter.fca.weight
_A : Optional[Any] = from_adapter.fca.bias
_A : str = from_adapter.fca.weight
_A : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_A : str = xmod_sent_encoder.layer_norm.weight
_A : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
_A : int = xmod.model.classification_heads['''mnli'''].dense.weight
_A : List[str] = xmod.model.classification_heads['''mnli'''].dense.bias
_A : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
_A : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_A : Any = xmod.model.encoder.lm_head.dense.weight
_A : Optional[int] = xmod.model.encoder.lm_head.dense.bias
_A : str = xmod.model.encoder.lm_head.layer_norm.weight
_A : Optional[Any] = xmod.model.encoder.lm_head.layer_norm.bias
_A : Tuple = xmod.model.encoder.lm_head.weight
_A : Tuple = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_A : Dict = xmod.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__lowercase )
_A : str = model(__lowercase )[0]
if classification_head:
_A : int = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__lowercase ) )
else:
_A : int = xmod.model(__lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_A : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_A : List[Any] = torch.allclose(__lowercase , __lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCamelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xmod_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_UpperCamelCase : str =parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 332 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class A_ ( lowerCAmelCase_ ):
def __init__( self : Any , snake_case_ : int , snake_case_ : Union[str, Any] ):
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : int = 1 , snake_case_ : Optional[torch.Generator] = None , snake_case_ : int = 5_0 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , **snake_case_ : Union[str, Any] , ):
_UpperCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case_ , )
_UpperCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=snake_case_ ), "This is a local test"
| 236 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[Any] = '''▁'''
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__SCREAMING_SNAKE_CASE :List[str] = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE :Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any = ["""input_ids""", """attention_mask"""]
_lowerCamelCase : List[int] = []
_lowerCamelCase : List[int] = []
def __init__( self : Dict , snake_case_ : Optional[int] , snake_case_ : Dict="<s>" , snake_case_ : Dict="</s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Union[str, Any]="<pad>" , snake_case_ : Dict="<mask>" , snake_case_ : Optional[int]=None , snake_case_ : Any=None , snake_case_ : Dict=None , snake_case_ : Optional[Dict[str, Any]] = None , snake_case_ : List[str]=None , **snake_case_ : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , tokenizer_file=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(snake_case_ )
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCAmelCase = src_lang if src_lang is not None else "en_XX"
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , snake_case_ : List[Any] ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase ( self : Union[str, Any] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def lowercase ( self : Optional[Any] , snake_case_ : str ):
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase ( self : Dict , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def lowercase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : List[Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
_UpperCAmelCase = self.convert_tokens_to_ids(snake_case_ )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowercase ( self : Any ):
_UpperCAmelCase = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase ( self : Union[str, Any] , snake_case_ : str ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(snake_case_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase ( self : Union[str, Any] , snake_case_ : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any] ):
_UpperCAmelCase = "".join(snake_case_ ).replace(snake_case_ , " " ).strip()
return out_string
def lowercase ( self : List[str] , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
def lowercase ( self : Optional[Any] , snake_case_ : List[str] , snake_case_ : str = "en_XX" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "ro_RO" , **snake_case_ : int , ):
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowercase ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase ( self : Optional[int] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = self.lang_code_to_id[src_lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
def lowercase ( self : int , snake_case_ : str ):
_UpperCAmelCase = self.lang_code_to_id[lang]
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
| 236 | 1 |
def __a ( __UpperCAmelCase = 10 , __UpperCAmelCase = 1000 , __UpperCAmelCase = True ):
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
return int((number_a + number_a) / 2 )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__UpperCAmelCase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
a__ = lower
a__ = higher
a__ = []
while True:
a__ = get_avg(__UpperCAmelCase , __UpperCAmelCase )
last_numbers.append(__UpperCAmelCase )
if answer(__UpperCAmelCase ) == "low":
a__ = number
elif answer(__UpperCAmelCase ) == "high":
a__ = number
else:
break
print(f"guess the number : {last_numbers[-1]}" )
print(f"details : {last_numbers!s}" )
def __a ( ):
a__ = int(input('''Enter lower value : ''' ).strip() )
a__ = int(input('''Enter high value : ''' ).strip() )
a__ = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 148 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( __UpperCAmelCase , __UpperCAmelCase = "cpu" , __UpperCAmelCase = None ):
a__ = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
a__ = v.half()
if save_path is None: # overwrite src_path
a__ = src_path
torch.save(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 148 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 256
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["melgan"]
def __init__(self : Tuple , UpperCAmelCase_ : SpectrogramNotesEncoder , UpperCAmelCase_ : SpectrogramContEncoder , UpperCAmelCase_ : TaFilmDecoder , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) ->None:
'''simple docstring'''
super().__init__()
# From MELGAN
lowerCamelCase__: List[Any] =math.log(1E-5) # Matches MelGAN training.
lowerCamelCase__: str =4.0 # Largest value for most examples
lowerCamelCase__: Dict =128
self.register_modules(
notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=(-1.0, 1.0) , UpperCAmelCase_ : Dict=False) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str =output_range
if clip:
lowerCamelCase__: Any =torch.clip(UpperCAmelCase_ , self.min_value , self.max_value)
# Scale to [0, 1].
lowerCamelCase__: Tuple =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=(-1.0, 1.0) , UpperCAmelCase_ : List[str]=False) ->Dict:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =input_range
lowerCamelCase__: List[str] =torch.clip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) if clip else outputs
# Scale to [0, 1].
lowerCamelCase__: List[str] =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =input_tokens > 0
lowerCamelCase__ , lowerCamelCase__: int =self.notes_encoder(
encoder_input_tokens=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =self.continuous_encoder(
encoder_inputs=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__: Tuple =noise_time
if not torch.is_tensor(UpperCAmelCase_):
lowerCamelCase__: Tuple =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(UpperCAmelCase_) and len(timesteps.shape) == 0:
lowerCamelCase__: Union[str, Any] =timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase__: List[Any] =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
lowerCamelCase__: Tuple =self.decoder(
encodings_and_masks=UpperCAmelCase_ , decoder_input_tokens=UpperCAmelCase_ , decoder_noise_time=UpperCAmelCase_)
return logits
@torch.no_grad()
def __call__(self : Union[str, Any] , UpperCAmelCase_ : List[List[int]] , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "numpy" , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , ) ->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_)}.""")
lowerCamelCase__: List[str] =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
lowerCamelCase__: Union[str, Any] =np.zeros([1, 0, self.n_dims] , np.floataa)
lowerCamelCase__: Optional[int] =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device)
for i, encoder_input_tokens in enumerate(UpperCAmelCase_):
if i == 0:
lowerCamelCase__: Optional[Any] =torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
lowerCamelCase__: int =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCamelCase__: Tuple =ones
lowerCamelCase__: Optional[Any] =self.scale_features(
UpperCAmelCase_ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase_)
lowerCamelCase__: Tuple =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=UpperCAmelCase_ , continuous_mask=UpperCAmelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCamelCase__: List[Any] =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase_)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
lowerCamelCase__: Optional[int] =self.decode(
encodings_and_masks=UpperCAmelCase_ , input_tokens=UpperCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCamelCase__: str =self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Optional[Any] =self.scale_to_features(UpperCAmelCase_ , input_range=[-1.0, 1.0])
lowerCamelCase__: List[Any] =mel[:1]
lowerCamelCase__: Optional[int] =mel.cpu().float().numpy()
lowerCamelCase__: int =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_)
logger.info("Generated segment" , UpperCAmelCase_)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.")
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.")
if output_type == "numpy":
lowerCamelCase__: Any =self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
lowerCamelCase__: Union[str, Any] =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase_)
| 59 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
super().setup()
lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype)
def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: int =self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
def cross_entropy(__a , __a , __a=None ):
lowerCamelCase__: Tuple =logits.shape[-1]
lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" )
lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 )
lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__: Optional[Any] =reduction(__a )
return loss
lowerCamelCase__: str =partial(__a , reduction=jnp.mean )
lowerCamelCase__: str =cross_entropy(__a , __a )
lowerCamelCase__: Optional[int] =cross_entropy(__a , __a )
lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3E-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_)
lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_)
return batch
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"])
lowerCamelCase__: Union[str, Any] ={
"input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids]
return zip(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))]
while len(UpperCAmelCase_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase_ ( __a , __a , __a=None ) -> str:
"""simple docstring"""
if seed is not None:
lowerCamelCase__: Any =dataset.shuffle(seed=__a )
for i in range(len(__a ) // batch_size ):
lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__a )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]:
"""simple docstring"""
def loss_fn(__a ):
lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" )
lowerCamelCase__: int =model_inputs.pop("end_labels" )
lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" )
lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs
return state.loss_fn(
__a , __a , __a , __a , __a , __a , )
lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a )
lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params )
lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" )
lowerCamelCase__: List[str] =state.apply_gradients(grads=__a )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( __a , **__a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =model_inputs.pop("start_labels" )
lowerCamelCase__: List[str] =model_inputs.pop("end_labels" )
lowerCamelCase__: int =model_inputs.pop("pooled_labels" )
lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs
lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a )
lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _SCREAMING_SNAKE_CASE ( train_state.TrainState ):
'''simple docstring'''
lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model.params
lowerCamelCase__: Tuple =TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple ={
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_)
lowerCamelCase__: str =train_state.TrainState(
step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , )
lowerCamelCase__: Tuple =args
lowerCamelCase__: Tuple =data_collator
lowerCamelCase__: str =lr
lowerCamelCase__: Dict =params
lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_)
return state
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.args
lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size
lowerCamelCase__: List[str] =jax.random.PRNGKey(0)
lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_)
lowerCamelCase__: Dict =0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""):
lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step)
lowerCamelCase__: List[Any] =running_loss.item() / i
lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1)
lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ={
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCAmelCase_))
self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size)
lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size
lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__: Optional[Any] =0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "):
lowerCamelCase__: int =self.data_collator(UpperCAmelCase_)
lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_)
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCAmelCase_ , params=state.params)
with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib"))
with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCAmelCase_)
print("DONE")
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__: Tuple =from_bytes(state.params , f.read() )
with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() )
lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) )
lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "r" ) as f:
lowerCamelCase__: Optional[Any] =json.load(__a )
lowerCamelCase__: Any =training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: int =num_train_steps - warmup_steps
lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a )
lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a )
lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str:
"""simple docstring"""
def weight_decay_mask(__a ):
lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a )
lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(__a )
lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a )
lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a )
return tx, lr
| 59 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """unispeech"""
def __init__( self, _lowercase=32, _lowercase=768, _lowercase=12, _lowercase=12, _lowercase=3072, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=0.1, _lowercase=0.0, _lowercase=0.0, _lowercase=0.1, _lowercase=0.1, _lowercase=0.02, _lowercase=1E-5, _lowercase="group", _lowercase="gelu", _lowercase=(512, 512, 512, 512, 512, 512, 512), _lowercase=(5, 2, 2, 2, 2, 2, 2), _lowercase=(10, 3, 3, 3, 3, 2, 2), _lowercase=False, _lowercase=128, _lowercase=16, _lowercase=False, _lowercase=True, _lowercase=0.05, _lowercase=10, _lowercase=2, _lowercase=0.0, _lowercase=10, _lowercase=0, _lowercase=320, _lowercase=2, _lowercase=0.1, _lowercase=100, _lowercase=256, _lowercase=256, _lowercase=0.1, _lowercase="mean", _lowercase=False, _lowercase=False, _lowercase=256, _lowercase=80, _lowercase=0, _lowercase=1, _lowercase=2, _lowercase=0.5, **_lowercase, ) -> List[str]:
super().__init__(**_lowercase, pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_lowercase )
SCREAMING_SNAKE_CASE_ = list(_lowercase )
SCREAMING_SNAKE_CASE_ = list(_lowercase )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_ctc_classes
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
SCREAMING_SNAKE_CASE_ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE_ = replace_prob
@property
def a__ ( self ) -> List[Any]:
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 238 |
'''simple docstring'''
class snake_case :
"""simple docstring"""
def __init__( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = {}
def a__ ( self, _lowercase ) -> int:
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE_ = {}
self.num_vertices += 1
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> List[str]:
self.add_vertex(_lowercase )
self.add_vertex(_lowercase )
if head == tail:
return
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE_ = list(edges[i] )
edges.sort(key=lambda _lowercase : e[2] )
for i in range(len(_lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE_ = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def __str__( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE_ = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def a__ ( _lowercase=None, _lowercase=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE_ = []
if edges is None:
SCREAMING_SNAKE_CASE_ = []
for vertex in vertices:
g.add_vertex(_lowercase )
for edge in edges:
g.add_edge(*_lowercase )
return g
class snake_case :
"""simple docstring"""
def __init__( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
def __len__( self ) -> Any:
return len(self.parent )
def a__ ( self, _lowercase ) -> Any:
if item in self.parent:
return self.find(_lowercase )
SCREAMING_SNAKE_CASE_ = item
SCREAMING_SNAKE_CASE_ = 0
return item
def a__ ( self, _lowercase ) -> List[str]:
if item not in self.parent:
return self.make_set(_lowercase )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE_ = self.find(self.parent[item] )
return self.parent[item]
def a__ ( self, _lowercase, _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = self.find(_lowercase )
SCREAMING_SNAKE_CASE_ = self.find(_lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE_ = roota
return roota
return None
@staticmethod
def a__ ( _lowercase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = graph.num_vertices
SCREAMING_SNAKE_CASE_ = Graph.UnionFind()
SCREAMING_SNAKE_CASE_ = []
while num_components > 1:
SCREAMING_SNAKE_CASE_ = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = edge
SCREAMING_SNAKE_CASE_ = union_find.find(_lowercase )
SCREAMING_SNAKE_CASE_ = union_find.find(_lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = cheap_edge[vertex]
if union_find.find(_lowercase ) != union_find.find(_lowercase ):
union_find.union(_lowercase, _lowercase )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE_ = num_components - 1
SCREAMING_SNAKE_CASE_ = Graph.build(edges=_lowercase )
return mst
| 238 | 1 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> str:
if not isinstance(_A , _A ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_A , _A ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
lowercase : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_A )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
_lowerCAmelCase = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
_lowerCAmelCase = {
'jukebox': 5_12,
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : int = PRETRAINED_LYRIC_TOKENS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self :Tuple , __magic_name__ :Dict , __magic_name__ :int , __magic_name__ :str , __magic_name__ :List[Any]=["v3", "v2", "v2"] , __magic_name__ :List[Any]=512 , __magic_name__ :Optional[Any]=5 , __magic_name__ :Tuple="<|endoftext|>" , **__magic_name__ :Tuple , ) ->Union[str, Any]:
lowercase : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
unk_token=__magic_name__ , n_genres=__magic_name__ , version=__magic_name__ , max_n_lyric_tokens=__magic_name__ , **__magic_name__ , )
lowercase : Optional[int] = version
lowercase : Optional[int] = max_n_lyric_tokens
lowercase : Optional[int] = n_genres
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowercase : Optional[int] = json.load(__magic_name__ )
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowercase : Any = json.load(__magic_name__ )
with open(__magic_name__ , encoding="""utf-8""" ) as vocab_handle:
lowercase : List[str] = json.load(__magic_name__ )
lowercase : str = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
lowercase : Dict = oov.replace(r"""\-'""" , r"""\-+'""" )
lowercase : Union[str, Any] = regex.compile(__magic_name__ )
lowercase : Tuple = {v: k for k, v in self.artists_encoder.items()}
lowercase : Tuple = {v: k for k, v in self.genres_encoder.items()}
lowercase : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __snake_case ( self :str ) ->Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __snake_case ( self :int ) ->Optional[int]:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __snake_case ( self :str , __magic_name__ :Optional[int] , __magic_name__ :Dict , __magic_name__ :Optional[Any] ) ->List[str]:
lowercase : Optional[int] = [self.artists_encoder.get(__magic_name__ , 0 ) for artist in list_artists]
for genres in range(len(__magic_name__ ) ):
lowercase : int = [self.genres_encoder.get(__magic_name__ , 0 ) for genre in list_genres[genres]]
lowercase : Optional[Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
lowercase : str = [[self.lyrics_encoder.get(__magic_name__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __snake_case ( self :Optional[int] , __magic_name__ :Optional[Any] ) ->List[str]:
return list(__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Tuple , __magic_name__ :List[str] , __magic_name__ :str , **__magic_name__ :Union[str, Any] ) ->Tuple:
lowercase , lowercase , lowercase : List[str] = self.prepare_for_tokenization(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : List[str] = self._tokenize(__magic_name__ )
return artist, genre, lyrics
def __snake_case ( self :Optional[int] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :str , __magic_name__ :bool = False ) ->Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
lowercase : Tuple = artists[idx].lower()
lowercase : Dict = [genres[idx].lower()]
else:
lowercase : str = self._normalize(artists[idx] ) + """.v2"""
lowercase : str = [
self._normalize(__magic_name__ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
lowercase : Dict = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
lowercase : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
lowercase : Optional[Any] = {vocab[index]: index + 1 for index in range(len(__magic_name__ ) )}
lowercase : List[Any] = 0
lowercase : str = len(__magic_name__ ) + 1
lowercase : List[Any] = self.vocab
lowercase : Optional[Any] = {v: k for k, v in self.vocab.items()}
lowercase : Tuple = """"""
else:
lowercase : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
lowercase : str = self._run_strip_accents(__magic_name__ )
lowercase : str = lyrics.replace("""\\""" , """\n""" )
lowercase : int = self.out_of_vocab.sub("""""" , __magic_name__ ), [], []
return artists, genres, lyrics
def __snake_case ( self :List[Any] , __magic_name__ :Optional[Any] ) ->List[str]:
lowercase : Any = unicodedata.normalize("""NFD""" , __magic_name__ )
lowercase : Optional[Any] = []
for char in text:
lowercase : int = unicodedata.category(__magic_name__ )
if cat == "Mn":
continue
output.append(__magic_name__ )
return "".join(__magic_name__ )
def __snake_case ( self :Optional[int] , __magic_name__ :str ) ->str:
lowercase : Tuple = (
[chr(__magic_name__ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(__magic_name__ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(__magic_name__ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
lowercase : Dict = frozenset(__magic_name__ )
lowercase : Optional[int] = re.compile(r"""_+""" )
lowercase : Optional[int] = """""".join([c if c in accepted else """_""" for c in text.lower()] )
lowercase : Union[str, Any] = pattern.sub("""_""" , __magic_name__ ).strip("""_""" )
return text
def __snake_case ( self :Optional[int] , __magic_name__ :List[str] ) ->str:
return " ".join(__magic_name__ )
def __snake_case ( self :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Optional[Union[str, TensorType]] = None , __magic_name__ :bool = False ) ->Tuple:
# Convert to TensorType
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase : Optional[Any] = TensorType(__magic_name__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
lowercase : int = tf.constant
lowercase : List[str] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
lowercase : List[Any] = torch.tensor
lowercase : List[str] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
lowercase : int = jnp.array
lowercase : Dict = _is_jax
else:
lowercase : List[Any] = np.asarray
lowercase : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
lowercase : Dict = [inputs]
if not is_tensor(__magic_name__ ):
lowercase : str = as_tensor(__magic_name__ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Optional[Any]="" , __magic_name__ :Optional[int]="pt" ) ->BatchEncoding:
lowercase : Union[str, Any] = [0, 0, 0]
lowercase : Optional[int] = [artist] * len(self.version )
lowercase : Dict = [genres] * len(self.version )
lowercase , lowercase , lowercase : Dict = self.tokenize(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase , lowercase , lowercase : Dict = self._convert_token_to_id(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase : Tuple = [-INFINITY] * len(full_tokens[-1] )
lowercase : Union[str, Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__magic_name__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def __snake_case ( self :int , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : int = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=__magic_name__ ) )
lowercase : Any = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=__magic_name__ ) )
lowercase : Optional[int] = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__magic_name__ ) )
return (artists_file, genres_file, lyrics_file)
def __snake_case ( self :int , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :Union[str, Any] ) ->List[str]:
lowercase : Union[str, Any] = self.artists_decoder.get(__magic_name__ )
lowercase : int = [self.genres_decoder.get(__magic_name__ ) for genre in genres_index]
lowercase : Union[str, Any] = [self.lyrics_decoder.get(__magic_name__ ) for character in lyric_index]
return artist, genres, lyrics
| 264 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def __A ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
__SCREAMING_SNAKE_CASE : List[str] = csv.reader(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = []
next(_SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(_SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for dataset in encoded_datasets:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Dict = np.zeros((n_batch, 2) , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
__SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__SCREAMING_SNAKE_CASE : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__SCREAMING_SNAKE_CASE : List[Any] = with_conta
__SCREAMING_SNAKE_CASE : Any = with_conta
__SCREAMING_SNAKE_CASE : int = len(_SCREAMING_SNAKE_CASE ) - 1
__SCREAMING_SNAKE_CASE : Dict = len(_SCREAMING_SNAKE_CASE ) - 1
__SCREAMING_SNAKE_CASE : Any = with_conta
__SCREAMING_SNAKE_CASE : Optional[int] = with_conta
__SCREAMING_SNAKE_CASE : List[str] = mc_label
__SCREAMING_SNAKE_CASE : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=_SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=_SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=_SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=_SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_SCREAMING_SNAKE_CASE , default=6.25E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_SCREAMING_SNAKE_CASE , default=0.0_1 )
parser.add_argument("--lm_coef" , type=_SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=_SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=_SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
print(_SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__SCREAMING_SNAKE_CASE : str = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__SCREAMING_SNAKE_CASE : int = ["_start_", "_delimiter_", "_classify_"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
model.to(_SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(_SCREAMING_SNAKE_CASE : Optional[int] ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(_SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_rocstories_dataset(args.train_dataset )
__SCREAMING_SNAKE_CASE : Dict = load_rocstories_dataset(args.eval_dataset )
__SCREAMING_SNAKE_CASE : int = (train_dataset, eval_dataset)
__SCREAMING_SNAKE_CASE : int = tokenize_and_encode(_SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
__SCREAMING_SNAKE_CASE : List[str] = model.config.n_positions // 2 - 2
__SCREAMING_SNAKE_CASE : Dict = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__SCREAMING_SNAKE_CASE : str = pre_process_datasets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = tensor_datasets[0], tensor_datasets[1]
__SCREAMING_SNAKE_CASE : List[str] = TensorDataset(*_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
__SCREAMING_SNAKE_CASE : Any = TensorDataset(*_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = SequentialSampler(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__SCREAMING_SNAKE_CASE : Tuple = args.max_steps
__SCREAMING_SNAKE_CASE : Optional[int] = args.max_steps // (len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
__SCREAMING_SNAKE_CASE : Tuple = len(_SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
__SCREAMING_SNAKE_CASE : int = list(model.named_parameters() )
__SCREAMING_SNAKE_CASE : Any = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__SCREAMING_SNAKE_CASE : Optional[Any] = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__SCREAMING_SNAKE_CASE : Any = AdamW(_SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
__SCREAMING_SNAKE_CASE : Any = get_linear_schedule_with_warmup(
_SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=_SCREAMING_SNAKE_CASE )
if args.do_train:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : str = tqdm(_SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Tuple = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = batch
__SCREAMING_SNAKE_CASE : Tuple = model(_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__SCREAMING_SNAKE_CASE : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__SCREAMING_SNAKE_CASE : Optional[Any] = "Training loss: {:.2e} lr: {:.2e}".format(_SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__SCREAMING_SNAKE_CASE : List[str] = model.module if hasattr(_SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , _SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(_SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__SCREAMING_SNAKE_CASE : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__SCREAMING_SNAKE_CASE : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = 0, 0
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = 0, 0
for batch in tqdm(_SCREAMING_SNAKE_CASE , desc="Evaluating" ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tuple(t.to(_SCREAMING_SNAKE_CASE ) for t in batch )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = batch
with torch.no_grad():
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = model(
_SCREAMING_SNAKE_CASE , mc_token_ids=_SCREAMING_SNAKE_CASE , lm_labels=_SCREAMING_SNAKE_CASE , mc_labels=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = mc_logits.detach().cpu().numpy()
__SCREAMING_SNAKE_CASE : Any = mc_labels.to("cpu" ).numpy()
__SCREAMING_SNAKE_CASE : Optional[int] = accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__SCREAMING_SNAKE_CASE : List[str] = eval_loss / nb_eval_steps
__SCREAMING_SNAKE_CASE : Dict = eval_accuracy / nb_eval_examples
__SCREAMING_SNAKE_CASE : Any = tr_loss / nb_tr_steps if args.do_train else None
__SCREAMING_SNAKE_CASE : Optional[Any] = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir , "eval_results.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 564 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase = logging.get_logger(__name__)
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *a__ , **a__ ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ )
| 564 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "roberta-prelayernorm"
def __init__( self : List[Any] , snake_case_ : List[Any]=5_02_65 , snake_case_ : Union[str, Any]=7_68 , snake_case_ : Optional[Any]=12 , snake_case_ : Optional[Any]=12 , snake_case_ : List[str]=30_72 , snake_case_ : Dict="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Tuple=5_12 , snake_case_ : Any=2 , snake_case_ : Tuple=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[Any]=2 , snake_case_ : Optional[int]="absolute" , snake_case_ : str=True , snake_case_ : Optional[int]=None , **snake_case_ : Optional[Any] , )-> Optional[Any]:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_)
__lowerCAmelCase =vocab_size
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =hidden_act
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =type_vocab_size
__lowerCAmelCase =initializer_range
__lowerCAmelCase =layer_norm_eps
__lowerCAmelCase =position_embedding_type
__lowerCAmelCase =use_cache
__lowerCAmelCase =classifier_dropout
class __a ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Optional[int])-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
])
| 354 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCamelCase ( *snake_case_ : Any , **snake_case_ : str)-> int:
pass
@is_pipeline_test
@require_vision
class __a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : Dict)-> List[str]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@require_tf
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""")
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
self.assertEqual(
nested_simplify(snake_case_) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@slow
@require_torch
def UpperCamelCase ( self : Any)-> Dict:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase ( self : Optional[int])-> int:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""")
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 354 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCamelCase ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a_: Tuple = 'ssube/stable-diffusion-x4-upscaler-onnx'
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase_ : Optional[int]=0 ):
_lowerCAmelCase =floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase_ ) )
_lowerCAmelCase =torch.manual_seed(UpperCamelCase_ )
_lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs()
_lowerCAmelCase =pipe(**UpperCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs()
_lowerCAmelCase =pipe(**UpperCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Dict ):
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs()
_lowerCAmelCase =pipe(**UpperCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Optional[Any] ):
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs()
_lowerCAmelCase =pipe(**UpperCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs()
_lowerCAmelCase =pipe(**UpperCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =ort.SessionOptions()
_lowerCAmelCase =False
return options
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase =init_image.resize((128, 128) )
# using the PNDM scheduler by default
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase ="""A fantasy landscape, trending on artstation"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase =init_image.resize((128, 128) )
_lowerCAmelCase =LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_lowerCAmelCase =OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
_lowerCAmelCase ="""A fantasy landscape, trending on artstation"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 714 |
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =0, 0, 0
_lowerCAmelCase =ugly_nums[ia] * 2
_lowerCAmelCase =ugly_nums[ia] * 3
_lowerCAmelCase =ugly_nums[ia] * 5
for _ in range(1 , lowercase__ ):
_lowerCAmelCase =min(lowercase__ , lowercase__ , lowercase__ )
ugly_nums.append(lowercase__ )
if next_num == next_a:
ia += 1
_lowerCAmelCase =ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_lowerCAmelCase =ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_lowerCAmelCase =ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'{ugly_numbers(200) = }')
| 149 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Union[str, Any] = '''roc_bert'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=7_6_8 , UpperCAmelCase=9_1_0 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2_4_8_5_8 , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =vocab_size
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =initializer_range
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =use_cache
__UpperCAmelCase =enable_pronunciation
__UpperCAmelCase =enable_shape
__UpperCAmelCase =pronunciation_embed_dim
__UpperCAmelCase =pronunciation_vocab_size
__UpperCAmelCase =shape_embed_dim
__UpperCAmelCase =shape_vocab_size
__UpperCAmelCase =concat_input
__UpperCAmelCase =position_embedding_type
__UpperCAmelCase =classifier_dropout
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase)
| 132 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
# Initialise PyTorch model
__UpperCAmelCase =BigBirdConfig.from_json_file(snake_case__ )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
__UpperCAmelCase =BigBirdForQuestionAnswering(snake_case__ )
else:
__UpperCAmelCase =BigBirdForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case__ , snake_case__ , is_trivia_qa=snake_case__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 132 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowercase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class lowercase_ :
__lowerCamelCase = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__lowerCamelCase = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__lowerCamelCase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__lowerCamelCase = field(default=A , metadata={"help": "Use FP16 to accelerate inference."} )
__lowerCamelCase = field(default=A , metadata={"help": "Benchmark training of model"} )
__lowerCamelCase = field(default=A , metadata={"help": "Verbose memory tracing"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__lowerCamelCase = field(
default=A , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__lowerCamelCase = field(default=A , metadata={"help": "Trace memory line by line"} )
__lowerCamelCase = field(default=A , metadata={"help": "Save result to a CSV file"} )
__lowerCamelCase = field(default=A , metadata={"help": "Save all print statements in a log file"} )
__lowerCamelCase = field(default=A , metadata={"help": "Whether to print environment information"} )
__lowerCamelCase = field(
default=A , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__lowerCamelCase = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__lowerCamelCase = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__lowerCamelCase = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__lowerCamelCase = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__lowerCamelCase = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__lowerCamelCase = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__lowerCamelCase = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__lowerCamelCase = field(
default=A , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _snake_case ( self ) -> Tuple:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , __A , )
def _snake_case ( self ) -> Tuple:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _snake_case ( self ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def _snake_case ( self ) -> List[str]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 431 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase_ :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Tuple:
SCREAMING_SNAKE_CASE_ : int =parent
SCREAMING_SNAKE_CASE_ : Dict =batch_size
SCREAMING_SNAKE_CASE_ : int =seq_length
SCREAMING_SNAKE_CASE_ : Tuple =is_training
SCREAMING_SNAKE_CASE_ : int =use_token_type_ids
SCREAMING_SNAKE_CASE_ : str =use_input_mask
SCREAMING_SNAKE_CASE_ : Tuple =use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =use_mc_token_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_size
SCREAMING_SNAKE_CASE_ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple =hidden_act
SCREAMING_SNAKE_CASE_ : Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict =max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[str] =type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] =type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str =initializer_range
SCREAMING_SNAKE_CASE_ : Tuple =num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] =num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] =scope
SCREAMING_SNAKE_CASE_ : str =self.vocab_size - 1
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict =None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE_ : Any =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : int =None
SCREAMING_SNAKE_CASE_ : Optional[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_config()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self ) -> List[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] =CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , __A , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Dict =CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int =config_and_inputs
SCREAMING_SNAKE_CASE_ : int ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _snake_case ( self , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Any =self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] =CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict =model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowercase_ ( A , A , A , unittest.TestCase ):
__lowerCamelCase = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCamelCase = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def _snake_case ( self , __A , __A , __A , __A , __A ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Tuple =CTRLModelTester(self )
SCREAMING_SNAKE_CASE_ : Dict =ConfigTester(self , config_class=__A , n_embd=37 )
def _snake_case ( self ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ) -> Optional[int]:
pass
@slow
def _snake_case ( self ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str =CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case ( self ) -> Optional[int]:
pass
@require_torch
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(__A )
SCREAMING_SNAKE_CASE_ : Any =torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
SCREAMING_SNAKE_CASE_ : Optional[Any] =[
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE_ : Dict =model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 431 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
a__ : str = getLogger(__name__)
def _lowerCAmelCase ( A__ , A__ , A__ , A__ = 8 , A__ = 1_024 , A__="val" , A__=None , A__=False , A__="summarization" , A__=None , A__=1 , A__ = None , A__="" , **A__ , ):
lowercase__ = str(A__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=A__ )
lowercase__ = Path(A__ )
lowercase__ = save_dir.joinpath(F'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(A__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(A__ ).cuda()
if fpaa:
lowercase__ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(A__ , A__ ) # update config with task specific params
lowercase__ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowercase__ = num_return_sequences
lowercase__ = AutoTokenizer.from_pretrained(A__ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowercase__ = tokenizer.model_max_length
if prefix is None:
lowercase__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
lowercase__ = SeqaSeqDataset(
A__ , A__ , A__ , max_target_length=1_024 , type_path=A__ , n_obs=A__ , prefix=A__ , **A__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowercase__ = ds.make_sortish_sampler(A__ , distributed=A__ , add_extra_examples=A__ , shuffle=A__ )
lowercase__ = DataLoader(A__ , sampler=A__ , batch_size=A__ , collate_fn=ds.collate_fn )
lowercase__ = []
for batch in tqdm(A__ ):
lowercase__ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=A__ , num_beams=A__ , **A__ , )
lowercase__ = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
lowercase__ = batch['ids']
if num_return_sequences > 1:
lowercase__ = chunks(A__ , A__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(A__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(A__ , A__ )
return results, sampler.num_replicas
def _lowerCAmelCase ( ):
lowercase__ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=A__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=A__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=A__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=A__ , default=A__ )
parser.add_argument(
'--type_path' , type=A__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=A__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=A__ , default=8 , required=A__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=A__ , default=-1 , required=A__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=A__ , default=A__ , required=A__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=A__ , default=1 , required=A__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=A__ , default=600 , required=A__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=A__ , default=A__ , required=A__ )
parser.add_argument('--tgt_lang' , type=A__ , default=A__ , required=A__ )
parser.add_argument(
'--prefix' , type=A__ , required=A__ , default=A__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
lowercase__ = time.time()
lowercase__, lowercase__ = parser.parse_known_args()
lowercase__ = parse_numeric_n_bool_cl_kwargs(A__ )
if generate_kwargs and args.local_rank <= 0:
print(F'''parsed the following generate kwargs: {generate_kwargs}''' )
lowercase__ = Path(args.save_dir + '_tmp' )
Path(A__ ).mkdir(exist_ok=A__ ) # this handles locking.
lowercase__ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowercase__ = {}
if args.src_lang is not None:
lowercase__ = args.src_lang
if args.tgt_lang is not None:
lowercase__ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=A__ )
lowercase__, lowercase__ = eval_data_dir(
args.data_dir , A__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=A__ , **A__ , )
if args.local_rank <= 0:
lowercase__ = Path(args.save_dir )
save_dir.mkdir(exist_ok=A__ )
lowercase__ = gather_results_from_each_node(A__ , A__ , args.sync_timeout )
lowercase__ = combine_partial_results(A__ )
if args.num_return_sequences > 1:
lowercase__ = save_dir.joinpath('pseudolabel_results.json' )
print(F'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(A__ , A__ )
return
lowercase__ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(A__ ) as f:
lowercase__ = [x.rstrip() for x in f.readlines()][: len(A__ )]
# Calculate metrics, save metrics, and save _generations.txt
lowercase__ = 'translation' in args.task
lowercase__ = calculate_bleu if calc_bleu else calculate_rouge
lowercase__ = 'bleu' if calc_bleu else 'rouge'
lowercase__ = score_fn(A__ , A__ )
lowercase__ = len(A__ )
lowercase__ = time.time() - start_time
lowercase__ = round(runtime / metrics['n_obs'] , 4 )
lowercase__ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowercase__ = save_dir.joinpath(F'''{args.type_path}_{metric_name}.json''' )
save_json(A__ , A__ , indent=A__ )
print(A__ )
write_txt_file(A__ , save_dir.joinpath(F'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(A__ , save_dir.joinpath(F'''{args.type_path}.target''' ) )
else:
shutil.rmtree(A__ )
def _lowerCAmelCase ( A__ ):
lowercase__ = []
for partial_result in partial_results:
records.extend(A__ )
lowercase__ = sorted(A__ , key=lambda A__ : x["id"] )
lowercase__ = [x['pred'] for x in records]
return preds
def _lowerCAmelCase ( A__ , A__ , A__ ):
# WAIT FOR lots of .json files
lowercase__ = time.time()
logger.info('waiting for all nodes to finish' )
lowercase__ = None
while (time.time() - start_wait) < timeout:
lowercase__ = list(save_dir.glob('rank_*.json' ) )
if len(A__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowercase__ = lmap(A__ , A__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 622 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = XLMProphetNetTokenizer
A : List[str] = False
A : Union[str, Any] = True
def UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMProphetNetTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '[PAD]'
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '[PAD]')
self.assertEqual(vocab_keys[1] , '[CLS]')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(lowerCAmelCase) , 10_12)
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_12)
def UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
lowercase__ = XLMProphetNetTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase)
lowercase__ = tokenizer.tokenize('This is a test')
self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased')
@slow
def UpperCAmelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [3_53_89, 66_72, 49, 2]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase))
@slow
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = {'input_ids': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 622 | 1 |
def __UpperCAmelCase ( UpperCAmelCase )-> str:
"""simple docstring"""
lowercase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowercase = ''''''
lowercase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowercase ,lowercase = 0, 0
# length[i] shows the length of palindromic substring with center i
lowercase = [1 for i in range(len(UpperCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
lowercase = 0
for j in range(len(UpperCAmelCase ) ):
lowercase = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowercase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowercase = j - k + 1 # noqa: E741
lowercase = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowercase = length[j]
lowercase = j
# create that string
lowercase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479 | from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __lowercase :
# setable values
lowercase = None
lowercase = None
lowercase = None # sigma(t_i)
@classmethod
def __a ( cls : List[str] ) -> Dict:
'''simple docstring'''
return cls()
@dataclass
class __lowercase ( _A ):
lowercase = 42
lowercase = 42
lowercase = 42
class __lowercase ( _A , _A ):
@property
def __a ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : int , __lowerCamelCase : float = 0.02 , __lowerCamelCase : float = 1_00 , __lowerCamelCase : float = 1.007 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.05 , __lowerCamelCase : float = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __a ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ) -> KarrasVeSchedulerState:
'''simple docstring'''
lowercase = jnp.arange(0 , __lowerCamelCase )[::-1].copy()
lowercase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__lowerCamelCase , schedule=jnp.array(__lowerCamelCase , dtype=jnp.floataa ) , timesteps=__lowerCamelCase , )
def __a ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowercase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowercase = 0
# sample eps ~ N(0, S_noise^2 * I)
lowercase = random.split(__lowerCamelCase , num=1 )
lowercase = self.config.s_noise * random.normal(key=__lowerCamelCase , shape=sample.shape )
lowercase = sigma + gamma * sigma
lowercase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_hat + sigma_hat * model_output
lowercase = (sample_hat - pred_original_sample) / sigma_hat
lowercase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
lowercase = sample_prev + sigma_prev * model_output
lowercase = (sample_prev - pred_original_sample) / sigma_prev
lowercase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__lowerCamelCase , derivative=__lowerCamelCase , state=__lowerCamelCase )
def __a ( self : int , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
| 479 | 1 |
'''simple docstring'''
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ = 200_0000 ) -> int:
'''simple docstring'''
snake_case : list[int] = [0]
snake_case : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case : int = 0
# an estimate of b, using the quadratic formula
snake_case : float
# the largest integer less than b_estimate
snake_case : int
# the largest integer less than b_estimate
snake_case : int
# the triangle number corresponding to b_floor
snake_case : int
# the triangle number corresponding to b_ceil
snake_case : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case : Optional[int] = floor(SCREAMING_SNAKE_CASE__ )
snake_case : int = ceil(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = triangle_numbers[b_floor]
snake_case : Union[str, Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case : str = triangle_b_first_guess * triangle_a
snake_case : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case : List[Any] = triangle_b_second_guess * triangle_a
snake_case : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 638 | 1 |
"""simple docstring"""
from PIL import Image
def A_ (__a ):
'''simple docstring'''
A_ , A_ = image.size
A_ = 0
A_ = image.load()
for i in range(__A ):
for j in range(__A ):
A_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
A_ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase_ : Optional[int] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 701 |
"""simple docstring"""
from __future__ import annotations
def A_ (__a , __a = None , __a = None , __a = False , ):
'''simple docstring'''
A_ = cipher_alphabet or [chr(__a ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
A_ = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
A_ = frequencies_dict
if not case_sensitive:
A_ = ciphertext.lower()
# Chi squared statistic values
A_ = {}
# cycle through all of the shifts
for shift in range(len(__a ) ):
A_ = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
A_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
__a )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
A_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
A_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
A_ = decrypted_with_shift.lower().count(__a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
A_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
A_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
A_ = decrypted_with_shift.count(__a )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
A_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
A_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
A_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__a ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
A_ = min(
__a , key=__a , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
A_
) , (
A_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 482 | 0 |
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
__lowercase : int = name
__lowercase : Tuple = val
def __str__( self ) -> Any:
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , UpperCamelCase_ ) -> int:
return self.val < other.val
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ ) -> List[Any]:
__lowercase : Union[str, Any] = {}
__lowercase : Optional[Any] = {}
__lowercase : int = self.build_heap(UpperCamelCase_ )
def __getitem__( self , UpperCamelCase_ ) -> str:
return self.get_value(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
return (idx - 1) // 2
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
return idx * 2 + 1
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return idx * 2 + 2
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
return self.heap_dict[key]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Optional[Any] = len(UpperCamelCase_ ) - 1
__lowercase : Optional[Any] = self.get_parent_idx(UpperCamelCase_ )
for idx, i in enumerate(UpperCamelCase_ ):
__lowercase : Tuple = idx
__lowercase : Tuple = i.val
for i in range(UpperCamelCase_ , -1 , -1 ):
self.sift_down(UpperCamelCase_ , UpperCamelCase_ )
return array
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
while True:
__lowercase : Optional[Any] = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741
__lowercase : Optional[Any] = self.get_right_child_idx(UpperCamelCase_ )
__lowercase : Any = idx
if l < len(UpperCamelCase_ ) and array[l] < array[idx]:
__lowercase : int = l
if r < len(UpperCamelCase_ ) and array[r] < array[smallest]:
__lowercase : str = r
if smallest != idx:
__lowercase ,__lowercase : Union[str, Any] = array[smallest], array[idx]
(
(
__lowercase
) ,(
__lowercase
) ,
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
__lowercase : Optional[Any] = smallest
else:
break
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : List[str] = self.get_parent_idx(UpperCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
__lowercase ,__lowercase : int = self.heap[idx], self.heap[p]
__lowercase ,__lowercase : Any = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
__lowercase : Any = p
__lowercase : Union[str, Any] = self.get_parent_idx(UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
return self.heap[0]
def _lowerCamelCase ( self ) -> List[str]:
__lowercase ,__lowercase : Dict = self.heap[-1], self.heap[0]
__lowercase ,__lowercase : Optional[int] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
__lowercase : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
self.heap.append(UpperCamelCase_ )
__lowercase : Optional[Any] = len(self.heap ) - 1
__lowercase : Dict = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowerCamelCase ( self ) -> List[Any]:
return len(self.heap ) == 0
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
__lowercase : int = new_value
__lowercase : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
a_ = Node('R', -1)
a_ = Node('B', 6)
a_ = Node('A', 3)
a_ = Node('X', 1)
a_ = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
a_ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a_ = logging.get_logger(__name__)
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 76 | 1 |
import unittest
from knapsack import knapsack as k
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : int = [0]
_SCREAMING_SNAKE_CASE : Dict = [0]
_SCREAMING_SNAKE_CASE : Dict = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 )
_SCREAMING_SNAKE_CASE : List[str] = [6_0]
_SCREAMING_SNAKE_CASE : Optional[int] = [1_0]
_SCREAMING_SNAKE_CASE : List[Any] = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 0 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = 3
_SCREAMING_SNAKE_CASE : Dict = [1, 2, 3]
_SCREAMING_SNAKE_CASE : Dict = [3, 2, 1]
_SCREAMING_SNAKE_CASE : List[str] = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 5 )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = 5_0
_SCREAMING_SNAKE_CASE : str = [6_0, 1_0_0, 1_2_0]
_SCREAMING_SNAKE_CASE : Optional[Any] = [1_0, 2_0, 3_0]
_SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase )
self.assertEqual(k.knapsack(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , 2_2_0 )
if __name__ == "__main__":
unittest.main() | 381 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
set_seed(770)
UpperCamelCase__ ={
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
UpperCamelCase__ ={
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
UpperCamelCase__ =os.path.dirname(os.path.abspath(__file__))
UpperCamelCase__ =os.path.join(os.path.expanduser('~'), '.cache')
UpperCamelCase__ =os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCamelCase, REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
hf_hub_download(repo_id=__lowerCamelCase, filename=__lowerCamelCase, local_dir=__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type == "text":
_SCREAMING_SNAKE_CASE : List[Any] = BarkSemanticModel
_SCREAMING_SNAKE_CASE : Any = BarkSemanticConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE : List[str] = BarkCoarseModel
_SCREAMING_SNAKE_CASE : Any = BarkCoarseConfig
_SCREAMING_SNAKE_CASE : str = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineModel
_SCREAMING_SNAKE_CASE : List[str] = BarkFineConfig
_SCREAMING_SNAKE_CASE : Optional[int] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : List[str] = f"""{model_type}_small""" if use_small else model_type
_SCREAMING_SNAKE_CASE : Optional[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCamelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"], model_info["file_name"] )
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )
# this is a hack
_SCREAMING_SNAKE_CASE : Any = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE : Optional[int] = model_args["vocab_size"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE : List[Any] = model_args.pop("n_head" )
_SCREAMING_SNAKE_CASE : Dict = model_args.pop("n_embd" )
_SCREAMING_SNAKE_CASE : Tuple = model_args.pop("n_layer" )
_SCREAMING_SNAKE_CASE : Tuple = ConfigClass(**checkpoint["model_args"] )
_SCREAMING_SNAKE_CASE : int = ModelClass(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = GenerationConfigClass()
_SCREAMING_SNAKE_CASE : Optional[int] = model_generation_config
_SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint["model"]
# fixup checkpoint
_SCREAMING_SNAKE_CASE : Optional[Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE : Optional[int] = k[len(__lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE : Tuple = new_k.replace(__lowerCamelCase, new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE : int = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_SCREAMING_SNAKE_CASE : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE : List[str] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(__lowerCamelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = model.num_parameters(exclude_embeddings=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6, 1 )}M params, {round(__lowerCamelCase, 3 )} loss""" )
model.eval()
model.to(__lowerCamelCase )
del checkpoint, state_dict
return model
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False, __lowerCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE : Union[str, Any] = "cpu" # do conversion on cpu
_SCREAMING_SNAKE_CASE : Union[str, Any] = _get_ckpt_path(__lowerCamelCase, use_small=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = _load_model(__lowerCamelCase, __lowerCamelCase, model_type=__lowerCamelCase, use_small=__lowerCamelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE : Union[str, Any] = _bark_load_model(__lowerCamelCase, "cpu", model_type=__lowerCamelCase, use_small=__lowerCamelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE : str = bark_model["model"]
if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE : Optional[Any] = 5
_SCREAMING_SNAKE_CASE : Optional[int] = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE : Any = torch.randint(256, (batch_size, sequence_length), dtype=torch.int )
_SCREAMING_SNAKE_CASE : Optional[int] = bark_model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase )
# take last logits
_SCREAMING_SNAKE_CASE : List[str] = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE : Tuple = 3
_SCREAMING_SNAKE_CASE : Union[str, Any] = 8
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int )
_SCREAMING_SNAKE_CASE : List[Any] = model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = bark_model(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase, "config.json" ) )
_SCREAMING_SNAKE_CASE : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : int = BarkSemanticModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = BarkCoarseModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = BarkFineModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_SCREAMING_SNAKE_CASE : Any = BarkConfig.from_sub_model_configs(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE : str = BarkModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = semantic
_SCREAMING_SNAKE_CASE : Tuple = coarseAcoustic
_SCREAMING_SNAKE_CASE : List[str] = fineAcoustic
_SCREAMING_SNAKE_CASE : Tuple = codec
_SCREAMING_SNAKE_CASE : Tuple = bark_generation_config
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
bark.save_pretrained(__lowerCamelCase, repo_id=__lowerCamelCase, push_to_hub=__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
UpperCamelCase__ =parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 381 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[int]="shi-labs/oneformer_demo" ) -> int:
with open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) as f:
UpperCamelCase__ : Tuple = json.load(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : int = []
UpperCamelCase__ : List[str] = []
for key, info in class_info.items():
UpperCamelCase__ : List[str] = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__UpperCAmelCase ) )
UpperCamelCase__ : Optional[Any] = thing_ids
UpperCamelCase__ : Optional[int] = class_names
return metadata
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=3, __magic_name__=30, __magic_name__=400, __magic_name__=None, __magic_name__=True, __magic_name__=True, __magic_name__=[0.5, 0.5, 0.5], __magic_name__=[0.5, 0.5, 0.5], __magic_name__=10, __magic_name__=False, __magic_name__=255, __magic_name__="shi-labs/oneformer_demo", __magic_name__="ade20k_panoptic.json", __magic_name__=10, ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = parent
UpperCamelCase__ : Tuple = batch_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Optional[Any] = min_resolution
UpperCamelCase__ : List[Any] = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 1333} if size is None else size
UpperCamelCase__ : Any = do_normalize
UpperCamelCase__ : Union[str, Any] = image_mean
UpperCamelCase__ : Tuple = image_std
UpperCamelCase__ : List[Any] = class_info_file
UpperCamelCase__ : Any = prepare_metadata(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[int] = num_text
UpperCamelCase__ : Optional[int] = repo_path
# for the post_process_functions
UpperCamelCase__ : str = 2
UpperCamelCase__ : List[Any] = 10
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Optional[Any] = 3
UpperCamelCase__ : str = 4
UpperCamelCase__ : Any = num_labels
UpperCamelCase__ : List[Any] = do_reduce_labels
UpperCamelCase__ : Tuple = ignore_index
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def UpperCamelCase__ ( self, __magic_name__, __magic_name__=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase__ : List[str] = image_inputs[0]
if isinstance(__magic_name__, Image.Image ):
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = image.size
else:
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ : List[str] = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase__ : int = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase__ : Optional[int] = self.size['''shortest_edge''']
UpperCamelCase__ : int = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase__ : List[str] = self.size['''shortest_edge''']
UpperCamelCase__ : Tuple = self.size['''shortest_edge''']
else:
UpperCamelCase__ : Union[str, Any] = []
for image in image_inputs:
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : List[Any] = max(__magic_name__, key=lambda __magic_name__ : item[0] )[0]
UpperCamelCase__ : Optional[Any] = max(__magic_name__, key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ), masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ), )
@require_torch
@require_vision
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[Any] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
a : str = image_processing_class
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = OneFormerImageProcessorTester(self )
@property
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__, '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__, '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__, '''size''' ) )
self.assertTrue(hasattr(__magic_name__, '''ignore_index''' ) )
self.assertTrue(hasattr(__magic_name__, '''class_info_file''' ) )
self.assertTrue(hasattr(__magic_name__, '''num_text''' ) )
self.assertTrue(hasattr(__magic_name__, '''repo_path''' ) )
self.assertTrue(hasattr(__magic_name__, '''metadata''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_reduce_labels''' ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, Image.Image )
# Test not batched input
UpperCamelCase__ : Any = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(__magic_name__, batched=__magic_name__ )
UpperCamelCase__ : str = image_processor(
__magic_name__, ['''semantic'''] * len(__magic_name__ ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
# Initialize image_processor
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__magic_name__, numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, np.ndarray )
# Test not batched input
UpperCamelCase__ : List[Any] = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
UpperCamelCase__ ,UpperCamelCase__ : int = self.image_processing_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self.image_processing_tester.get_expected_values(__magic_name__, batched=__magic_name__ )
UpperCamelCase__ : Optional[Any] = image_processor(
__magic_name__, ['''semantic'''] * len(__magic_name__ ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processor
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__magic_name__, torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, torch.Tensor )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processor(image_inputs[0], ['''semantic'''], return_tensors='''pt''' ).pixel_values
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.image_processing_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape, (1, self.image_processing_tester.num_channels, expected_height, expected_width), )
# Test batched
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(__magic_name__, batched=__magic_name__ )
UpperCamelCase__ : Dict = image_processor(
__magic_name__, ['''semantic'''] * len(__magic_name__ ), return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase__ ( self, __magic_name__=False, __magic_name__=False, __magic_name__="np" ) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.num_labels
UpperCamelCase__ : Any = None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester, equal_resolution=__magic_name__ )
if with_segmentation_maps:
UpperCamelCase__ : Optional[int] = num_labels
if is_instance_map:
UpperCamelCase__ : Optional[int] = list(range(__magic_name__ ) ) * 2
UpperCamelCase__ : Union[str, Any] = dict(enumerate(__magic_name__ ) )
UpperCamelCase__ : Any = [
np.random.randint(0, high * 2, (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ : str = [Image.fromarray(__magic_name__ ) for annotation in annotations]
UpperCamelCase__ : Tuple = image_processor(
__magic_name__, ['''semantic'''] * len(__magic_name__ ), __magic_name__, return_tensors='''pt''', instance_id_to_semantic_id=__magic_name__, pad_and_return_pixel_mask=__magic_name__, )
return inputs
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
def common(__magic_name__=False, __magic_name__=None ):
UpperCamelCase__ : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=__magic_name__, is_instance_map=__magic_name__, segmentation_type=__magic_name__ )
UpperCamelCase__ : Any = inputs['''mask_labels''']
UpperCamelCase__ : Union[str, Any] = inputs['''class_labels''']
UpperCamelCase__ : Optional[int] = inputs['''pixel_values''']
UpperCamelCase__ : List[Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(__magic_name__, __magic_name__, __magic_name__ ):
self.assertEqual(mask_label.shape[0], class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:], pixel_values.shape[2:] )
self.assertEqual(len(__magic_name__ ), self.image_processing_tester.num_text )
common()
common(is_instance_map=__magic_name__ )
common(is_instance_map=__magic_name__, segmentation_type='''pil''' )
common(is_instance_map=__magic_name__, segmentation_type='''pil''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = np.zeros((20, 50) )
UpperCamelCase__ : int = 1
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : List[str] = binary_mask_to_rle(__magic_name__ )
self.assertEqual(len(__magic_name__ ), 4 )
self.assertEqual(rle[0], 21 )
self.assertEqual(rle[1], 45 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
UpperCamelCase__ : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : str = fature_extractor.post_process_semantic_segmentation(__magic_name__ )
self.assertEqual(len(__magic_name__ ), self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape, (
self.image_processing_tester.height,
self.image_processing_tester.width,
), )
UpperCamelCase__ : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ : Dict = fature_extractor.post_process_semantic_segmentation(__magic_name__, target_sizes=__magic_name__ )
self.assertEqual(segmentation[0].shape, target_sizes[0] )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(__magic_name__, threshold=0 )
self.assertTrue(len(__magic_name__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ), __magic_name__ )
self.assertEqual(
el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes, max_seq_length=77, task_seq_length=77, class_info_file='''ade20k_panoptic.json''', num_text=self.image_processing_tester.num_text, repo_path='''shi-labs/oneformer_demo''', )
UpperCamelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Any = image_processor.post_process_panoptic_segmentation(__magic_name__, threshold=0 )
self.assertTrue(len(__magic_name__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ), __magic_name__ )
self.assertEqual(
el['''segmentation'''].shape, (self.image_processing_tester.height, self.image_processing_tester.width) )
| 253 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=3, __magic_name__=18, __magic_name__=30, __magic_name__=400, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=[0.5, 0.5, 0.5], __magic_name__=[0.5, 0.5, 0.5], ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = size if size is not None else {'''shortest_edge''': 18}
UpperCamelCase__ : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : Optional[Any] = min_resolution
UpperCamelCase__ : List[Any] = max_resolution
UpperCamelCase__ : List[Any] = do_resize
UpperCamelCase__ : Any = size
UpperCamelCase__ : Dict = do_center_crop
UpperCamelCase__ : Tuple = crop_size
UpperCamelCase__ : Tuple = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : Dict = image_std
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__, '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__, '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__, '''size''' ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, Image.Image )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : List[str] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, np.ndarray )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Dict = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__magic_name__, torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__, torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Union[str, Any] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 253 | 1 |
import qiskit
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCAmelCase = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__lowerCAmelCase = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 102 |
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert x is not None
assert y is not None
__lowerCAmelCase = len(UpperCAmelCase__ )
__lowerCAmelCase = len(UpperCAmelCase__ )
# declaring the array for storing the dp values
__lowerCAmelCase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
__lowerCAmelCase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCAmelCase = ''
__lowerCAmelCase, __lowerCAmelCase = m, n
while i > 0 and j > 0:
__lowerCAmelCase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCAmelCase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase = '''AGGTAB'''
lowerCamelCase = '''GXTXAYB'''
lowerCamelCase = 4
lowerCamelCase = '''GTAB'''
lowerCamelCase , lowerCamelCase = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 102 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ['input_features', 'attention_mask']
def __init__( self ,_lowerCAmelCase=80 ,_lowerCAmelCase=1_60_00 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=10 ,_lowerCAmelCase=25 ,_lowerCAmelCase="hamming_window" ,_lowerCAmelCase=3_2768.0 ,_lowerCAmelCase=0.97 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
super().__init__(feature_size=_lowerCAmelCase ,sampling_rate=_lowerCAmelCase ,padding_value=_lowerCAmelCase ,**_lowerCAmelCase )
lowerCamelCase__ = feature_size
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = padding_value
lowerCamelCase__ = hop_length
lowerCamelCase__ = win_length
lowerCamelCase__ = frame_signal_scale
lowerCamelCase__ = preemphasis_coeff
lowerCamelCase__ = mel_floor
lowerCamelCase__ = normalize_means
lowerCamelCase__ = normalize_vars
lowerCamelCase__ = win_function
lowerCamelCase__ = return_attention_mask
lowerCamelCase__ = win_length * sampling_rate // 10_00
lowerCamelCase__ = hop_length * sampling_rate // 10_00
lowerCamelCase__ = optimal_fft_length(self.sample_size )
lowerCamelCase__ = (self.n_fft // 2) + 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
if self.win_function == "hamming_window":
lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=_lowerCAmelCase )
else:
lowerCamelCase__ = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCamelCase__ = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCamelCase__ = spectrogram(
one_waveform * self.frame_signal_scale ,window=_lowerCAmelCase ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=_lowerCAmelCase ,preemphasis=self.preemphasis_coeff ,mel_filters=_lowerCAmelCase ,mel_floor=self.mel_floor ,log_mel="""log""" ,)
return msfc_features.T
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCamelCase__ = x[:input_length].mean(axis=0 )
lowerCamelCase__ = np.subtract(_lowerCAmelCase ,_lowerCAmelCase )
if self.normalize_vars:
lowerCamelCase__ = x[:input_length].std(axis=0 )
lowerCamelCase__ = np.divide(_lowerCAmelCase ,_lowerCAmelCase )
if input_length < x.shape[0]:
lowerCamelCase__ = padding_value
# make sure array is in float32
lowerCamelCase__ = x.astype(np.floataa )
return x
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCAmelCase ,_lowerCAmelCase ,self.padding_value ) for x, n in zip(_lowerCAmelCase ,_lowerCAmelCase )]
def __call__( self ,_lowerCAmelCase ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase__ = isinstance(_lowerCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ = is_batched_numpy or (
isinstance(_lowerCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase ,np.ndarray ):
lowerCamelCase__ = np.asarray(_lowerCAmelCase ,dtype=np.floataa )
elif isinstance(_lowerCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ = [raw_speech]
# extract fbank features
lowerCamelCase__ = [self._extract_mfsc_features(_lowerCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase__ = BatchFeature({"""input_features""": features} )
lowerCamelCase__ = self.pad(
_lowerCAmelCase ,padding=_lowerCAmelCase ,max_length=_lowerCAmelCase ,truncation=_lowerCAmelCase ,pad_to_multiple_of=_lowerCAmelCase ,return_attention_mask=_lowerCAmelCase ,**_lowerCAmelCase ,)
# make sure list is in array format
lowerCamelCase__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] ,_lowerCAmelCase ):
lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.floataa ) for feature in input_features]
lowerCamelCase__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCamelCase__ = [np.asarray(_lowerCAmelCase ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase__ = (
np.array(_lowerCAmelCase ,dtype=np.intaa )
if self._get_padding_strategies(_lowerCAmelCase ,max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase__ = self.normalize(
padded_inputs["""input_features"""] ,attention_mask=_lowerCAmelCase )
if return_tensors is not None:
lowerCamelCase__ = padded_inputs.convert_to_tensors(_lowerCAmelCase )
return padded_inputs
| 50 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> list:
'''simple docstring'''
UpperCAmelCase__ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCAmelCase__ : List[Any] = True
for i in range(0 , len(snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : int = False
for i in range(1 , len(snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCAmelCase__ : str = False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_lowerCAmelCase : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
_lowerCAmelCase : Optional[int] = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 438 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_SCREAMING_SNAKE_CASE = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =VOCAB_FILES_NAMES
__lowerCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] =['''input_ids''', '''attention_mask''']
def __init__( self :str, snake_case :List[str], snake_case :Optional[Any]=False, snake_case :Dict=False, snake_case :List[str]=False, snake_case :List[str]=None, snake_case :List[str]=None, snake_case :Dict=None, snake_case :Tuple=None, snake_case :Optional[Dict[str, Any]] = None, **snake_case :List[str], ):
"""simple docstring"""
_lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
_lowercase =kwargs.get('name_or_path')
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored')
_lowercase ='None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowercase ='<|endoftext|>' if eos_token is None else eos_token
_lowercase ='<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowercase =unk_token if pad_token is None else pad_token
_lowercase =eos_token if bos_token is None else bos_token
else:
_lowercase ='<pad>' if pad_token is None else pad_token
_lowercase ='<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=snake_case, remove_space=snake_case, keep_accents=snake_case, bos_token=snake_case, eos_token=snake_case, unk_token=snake_case, pad_token=snake_case, sp_model_kwargs=self.sp_model_kwargs, **snake_case, )
_lowercase =do_lower_case
_lowercase =remove_space
_lowercase =keep_accents
_lowercase =vocab_file
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(snake_case)
# Used for whitespace normalization in input texts
# fmt : off
_lowercase ={' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowercase =re.compile(
f'''[{"".join(map(snake_case, list(range(0, 9)) + list(range(11, 32)) + list(range(127, 160)) + [160, 173, 8203]))}]''')
def __getstate__( self :Tuple):
"""simple docstring"""
_lowercase =self.__dict__.copy()
_lowercase =None
return state
def __setstate__( self :Any, snake_case :Any):
"""simple docstring"""
_lowercase =d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase ={}
_lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
return len(self.sp_model)
def UpperCamelCase__ ( self :Dict, snake_case :str):
"""simple docstring"""
_lowercase =self.non_printing_characters_re.sub('', snake_case)
# Normalize whitespaces
_lowercase =''.join([char if char not in self.whitespaces else ' ' for char in text])
# NFC Unicode normalization
_lowercase =unicodedata.normalize('NFC', snake_case)
return text
def UpperCamelCase__ ( self :Dict, snake_case :str, **snake_case :List[str]):
"""simple docstring"""
_lowercase =self.preprocess_text(snake_case)
return self.sp_model.encode(snake_case, out_type=snake_case)
def UpperCamelCase__ ( self :Dict, snake_case :str):
"""simple docstring"""
return self.sp_model.PieceToId(snake_case)
def UpperCamelCase__ ( self :List[str], snake_case :int):
"""simple docstring"""
return self.sp_model.IdToPiece(snake_case)
@staticmethod
def UpperCamelCase__ ( snake_case :str):
"""simple docstring"""
return out_string
def UpperCamelCase__ ( self :Dict, snake_case :List[str]):
"""simple docstring"""
_lowercase =[]
_lowercase =''
_lowercase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case) + token
_lowercase =True
_lowercase =[]
else:
current_sub_tokens.append(snake_case)
_lowercase =False
out_string += self.sp_model.decode(snake_case)
return out_string
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase ={self.convert_ids_to_tokens(snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase__ ( self :Optional[Any], snake_case :str, snake_case :Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(snake_case):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase =os.path.join(
snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, snake_case)
elif not os.path.isfile(self.vocab_file):
with open(snake_case, 'wb') as fi:
_lowercase =self.sp_model.serialized_model_proto()
fi.write(snake_case)
return (out_vocab_file,)
def UpperCamelCase__ ( self :str, snake_case :Union[str, List[str]], snake_case :Union[str, bool] = False):
"""simple docstring"""
if isinstance(snake_case, snake_case):
_lowercase =self.preprocess_text(snake_case)
_lowercase =self.sp_model.encode(snake_case)
else:
_lowercase =[self.preprocess_text(snake_case) for t in text]
_lowercase =self.sp_model.encode(snake_case)
if return_tensors is True or return_tensors == "pt":
_lowercase =torch.tensor(snake_case)
return token_ids
def UpperCamelCase__ ( self :Tuple, snake_case :Union[int, List[int]]):
"""simple docstring"""
return self.sp_model.decode(snake_case)
def UpperCamelCase__ ( self :Optional[Any], snake_case :"Conversation"):
"""simple docstring"""
_lowercase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
_lowercase =(
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(snake_case) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=snake_case)
| 720 |
def _snake_case (_snake_case : str , _snake_case : str) -> float:
def get_matched_characters(_snake_case : str , _snake_case : str) -> str:
_lowercase =[]
_lowercase =min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
_lowercase =int(max(0 , i - limit))
_lowercase =int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(_snake_case)
_lowercase =f'''{_stra[0:_stra.index(_snake_case)]} {_stra[_stra.index(_snake_case) + 1:]}'''
return "".join(_snake_case)
# matching characters
_lowercase =get_matched_characters(_snake_case , _snake_case)
_lowercase =get_matched_characters(_snake_case , _snake_case)
_lowercase =len(_snake_case)
# transposition
_lowercase =(
len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case) if ca != ca]) // 2
)
if not match_count:
_lowercase =0.0
else:
_lowercase =(
1
/ 3
* (
match_count / len(_snake_case)
+ match_count / len(_snake_case)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowercase =0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 557 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = process
lowerCamelCase = params
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
lowerCamelCase = self.dataset[i]
lowerCamelCase = self.process(_a , **self.params )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
lowerCamelCase = loader
lowerCamelCase = infer
lowerCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
lowerCamelCase = None
lowerCamelCase = loader_batch_size
# Internal bookkeeping
lowerCamelCase = None
lowerCamelCase = None
def __len__( self ):
"""simple docstring"""
return len(self.loader )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
lowerCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
lowerCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
lowerCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
lowerCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
lowerCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
lowerCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
lowerCamelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
lowerCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
lowerCamelCase = self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
lowerCamelCase = next(self.iterator )
lowerCamelCase = self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
lowerCamelCase = processed
lowerCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
lowerCamelCase = None
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
if self.subiterator is None:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
lowerCamelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
lowerCamelCase = next(self.subiterator )
return processed
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __iter__( self ):
"""simple docstring"""
lowerCamelCase = iter(self.loader )
return self
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
lowerCamelCase = False
lowerCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
lowerCamelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
lowerCamelCase = processed
else:
lowerCamelCase = list(processed.keys() )[0]
lowerCamelCase = processed[key]
if isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
lowerCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
lowerCamelCase = observed_batch_size
lowerCamelCase = processed
lowerCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
lowerCamelCase = self.loader_batch_item()
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
if is_last:
return accumulator
else:
lowerCamelCase = processed
lowerCamelCase = item.pop("""is_last""" )
accumulator.append(_a )
return accumulator
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = key
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return self.dataset[i][self.key]
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = dataset
lowerCamelCase = keya
lowerCamelCase = keya
def __len__( self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _a ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 543 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
lowerCAmelCase : Optional[Any] = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
lowerCAmelCase : Optional[Any] = {
"""ctrl""": 256,
}
lowerCAmelCase : int = {
"""Pregnancy""": 16_8629,
"""Christianity""": 7675,
"""Explain""": 10_6423,
"""Fitness""": 6_3440,
"""Saving""": 6_3163,
"""Ask""": 2_7171,
"""Ass""": 9_5985,
"""Joke""": 16_3509,
"""Questions""": 4_5622,
"""Thoughts""": 4_9605,
"""Retail""": 5_2342,
"""Feminism""": 16_4338,
"""Writing""": 1_1992,
"""Atheism""": 19_2263,
"""Netflix""": 4_8616,
"""Computing""": 3_9639,
"""Opinion""": 4_3213,
"""Alone""": 4_4967,
"""Funny""": 5_8917,
"""Gaming""": 4_0358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 7_7138,
"""Diet""": 3_6206,
"""Legal""": 1_1859,
"""Norman""": 4939,
"""Tip""": 7_2689,
"""Weight""": 5_2343,
"""Movies""": 4_6273,
"""Running""": 2_3425,
"""Science""": 2090,
"""Horror""": 3_7793,
"""Confession""": 6_0572,
"""Finance""": 1_2250,
"""Politics""": 1_6360,
"""Scary""": 19_1985,
"""Support""": 1_2654,
"""Technologies""": 3_2516,
"""Teenage""": 6_6160,
"""Event""": 3_2769,
"""Learned""": 6_7460,
"""Notion""": 18_2770,
"""Wikipedia""": 3_7583,
"""Books""": 6665,
"""Extract""": 7_6050,
"""Confessions""": 10_2701,
"""Conspiracy""": 7_5932,
"""Links""": 6_3674,
"""Narcissus""": 15_0425,
"""Relationship""": 5_4766,
"""Relationships""": 13_4796,
"""Reviews""": 4_1671,
"""News""": 4256,
"""Translation""": 2_6820,
"""multilingual""": 12_8406,
}
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase = set()
lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase = char
lowerCamelCase = set(snake_case__ )
return pairs
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = CONTROL_CODES
def __init__( self , _a , _a , _a="<unk>" , **_a ):
"""simple docstring"""
super().__init__(unk_token=_a , **_a )
with open(_a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase = json.load(_a )
lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(_a , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase = [tuple(merge.split() ) for merge in merges]
lowerCamelCase = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase = {}
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowerCAmelCase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase = tuple(_a )
lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCamelCase = get_pairs(_a )
if not pairs:
return token
while True:
lowerCamelCase = min(_a , key=lambda _a : self.bpe_ranks.get(_a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase = bigram
lowerCamelCase = []
lowerCamelCase = 0
while i < len(_a ):
try:
lowerCamelCase = word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase = tuple(_a )
lowerCamelCase = new_word
if len(_a ) == 1:
break
else:
lowerCamelCase = get_pairs(_a )
lowerCamelCase = """@@ """.join(_a )
lowerCamelCase = word[:-4]
lowerCamelCase = word
return word
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = re.findall(r"""\S+\n?""" , _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(""" """ ) ) )
return split_tokens
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.decoder.get(_a , self.unk_token )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """ """.join(_a ).replace("""@@ """ , """""" ).strip()
return out_string
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + """\n""" )
lowerCamelCase = 0
with open(_a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 543 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = BioGptTokenizer
_lowercase : str = False
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__magic_name__ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__magic_name__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self: Any , __UpperCamelCase: str ):
'''simple docstring'''
__magic_name__ = 'lower newer'
__magic_name__ = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
__magic_name__ = BioGptTokenizer(self.vocab_file , self.merges_file )
__magic_name__ = 'lower'
__magic_name__ = ['low', 'er</w>']
__magic_name__ = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = tokens + ['<unk>']
__magic_name__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__magic_name__ = tokenizer.encode('sequence builders' , add_special_tokens=__UpperCamelCase )
__magic_name__ = tokenizer.encode('multi-sequence build' , add_special_tokens=__UpperCamelCase )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
__magic_name__ = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 184 |
from __future__ import annotations
def _lowercase ( a_ : int ) -> bool:
'''simple docstring'''
__magic_name__ = str(a_ )
return len(a_ ) == 9 and set(a_ ) == set('123456789' )
def _lowercase ( ) -> int | None:
'''simple docstring'''
for base_num in range(9_9_9_9 ,4_9_9_9 ,-1 ):
__magic_name__ = 1_0_0_0_0_2 * base_num
if is_9_pandigital(a_ ):
return candidate
for base_num in range(3_3_3 ,9_9 ,-1 ):
__magic_name__ = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(a_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 184 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'roberta-base': 5_12,
'roberta-large': 5_12,
'roberta-large-mnli': 5_12,
'distilroberta-base': 5_12,
'roberta-base-openai-detector': 5_12,
'roberta-large-openai-detector': 5_12,
}
class a ( lowercase ):
UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
UpperCamelCase : Dict = RobertaTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
UpperCAmelCase__ : Optional[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase__ : List[Any] = add_prefix_space
UpperCAmelCase__ : Tuple = pre_tok_class(**UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = add_prefix_space
UpperCAmelCase__ : Optional[Any] = 'post_processor'
UpperCAmelCase__ : List[str] = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
UpperCAmelCase__ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Any = tuple(state['sep'] )
if "cls" in state:
UpperCAmelCase__ : List[str] = tuple(state['cls'] )
UpperCAmelCase__ : Tuple = False
if state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
UpperCAmelCase__ : Optional[int] = add_prefix_space
UpperCAmelCase__ : int = True
if state.get('trim_offsets' , UpperCamelCase_ ) != trim_offsets:
UpperCAmelCase__ : Optional[int] = trim_offsets
UpperCAmelCase__ : Union[str, Any] = True
if changes_to_apply:
UpperCAmelCase__ : int = getattr(UpperCamelCase_ , state.pop('type' ) )
UpperCAmelCase__ : Dict = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
def __snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
UpperCAmelCase__ : Tuple = value
def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
UpperCAmelCase__ : Optional[int] = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : List[str] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 110 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase__ = 'path-to-your-trained-model'
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
UpperCamelCase__ = 'A photo of sks dog in a bucket'
UpperCamelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 110 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a_ : List[Any] = False
class _snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion')
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a)
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(a)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = generator.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy').images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(a)
pipe.set_progress_bar_config(disable=a)
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE = torch.manual_seed(0)
SCREAMING_SNAKE_CASE = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 444 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class _snake_case :
_lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _snake_case :
_lowercase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowercase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_lowercase : Optional[int] = field(
default=10_24 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(
default=1_42 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_lowercase : Optional[int] = field(
default=1_42 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_lowercase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_lowercase : Optional[str] = field(default=A__ , metadata={'''help''': '''Source language id for translation.'''} )
_lowercase : Optional[str] = field(default=A__ , metadata={'''help''': '''Target language id for translation.'''} )
_lowercase : Optional[int] = field(default=A__ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
logger.info(F'''***** {split} metrics *****''')
for key in sorted(metrics.keys()):
logger.info(F''' {key} = {metrics[key]}''')
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F'''{split}_results.json'''))
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCAmelCase)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
assert hasattr(_UpperCAmelCase , _UpperCAmelCase), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCAmelCase , data_args.task)
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCAmelCase , (MBartTokenizer, MBartTokenizerFast)):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang)
if model_args.freeze_embeds:
freeze_embeds(_UpperCAmelCase)
if model_args.freeze_encoder:
freeze_params(model.get_encoder())
assert_all_frozen(model.get_encoder())
SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCAmelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCAmelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCAmelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task , _UpperCAmelCase) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , data_collator=SeqaSeqDataCollator(
_UpperCAmelCase , _UpperCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores) , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info('*** Train ***')
SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCAmelCase , training_args.output_dir)
all_metrics.update(_UpperCAmelCase)
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json'))
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix='val')
SCREAMING_SNAKE_CASE = data_args.n_val
SCREAMING_SNAKE_CASE = round(metrics['val_loss'] , 4)
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCAmelCase , training_args.output_dir)
all_metrics.update(_UpperCAmelCase)
if training_args.do_predict:
logger.info('*** Predict ***')
SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=_UpperCAmelCase , metric_key_prefix='test')
SCREAMING_SNAKE_CASE = test_output.metrics
SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE = round(metrics['test_loss'] , 4)
handle_metrics('test' , _UpperCAmelCase , training_args.output_dir)
all_metrics.update(_UpperCAmelCase)
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = lmap(str.strip , _UpperCAmelCase)
write_txt_file(_UpperCAmelCase , os.path.join(training_args.output_dir , 'test_generations.txt'))
if trainer.is_world_process_zero():
save_json(_UpperCAmelCase , os.path.join(training_args.output_dir , 'all_results.json'))
return all_metrics
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 444 | 1 |
import itertools
import os
import re
_lowerCamelCase : Union[str, Any] = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
_lowerCamelCase : Any = re.compile(R'''([a-z\d])([A-Z])''')
_lowerCamelCase : Optional[Any] = re.compile(R'''(?<!_)_(?!_)''')
_lowerCamelCase : Union[str, Any] = re.compile(R'''(_{2,})''')
_lowerCamelCase : List[str] = R'''^\w+(\.\w+)*$'''
_lowerCamelCase : str = R'''<>:/\|?*'''
def A__ ( __A : Optional[Any] ) ->Any:
__A =_uppercase_uppercase_re.sub(r'''\1_\2''' , __A )
__A =_lowercase_uppercase_re.sub(r'''\1_\2''' , __A )
return name.lower()
def A__ ( __A : Optional[int] ) ->List[Any]:
__A =_single_underscore_re.split(__A )
__A =[_multiple_underscores_re.split(__A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__A ) if n != '''''' )
def A__ ( __A : List[str] ) ->List[Any]:
if os.path.basename(__A ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__A )
def A__ ( __A : Optional[Any] , __A : Optional[Any] ) ->str:
if os.path.basename(__A ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __A ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__A )}-{split}'''
def A__ ( __A : Dict , __A : Optional[int] , __A : Any , __A : str=None ) ->int:
__A =filename_prefix_for_split(__A , __A )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
__A =os.path.join(__A , __A )
return F'''{filepath}*'''
def A__ ( __A : List[Any] , __A : List[Any] , __A : int , __A : Tuple=None , __A : Tuple=None ) ->Dict:
__A =filename_prefix_for_split(__A , __A )
__A =os.path.join(__A , __A )
if shard_lengths:
__A =len(__A )
__A =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__A )]
if filetype_suffix:
__A =[filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__A =prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 184 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
A_ = False
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCamelCase_ =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase_ =torch.manual_seed(0 )
UpperCamelCase_ =pipe(
image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase_ =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ =np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 391 | 0 |
"""simple docstring"""
def _lowerCAmelCase(a : list ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE =grid[0]
for row_n in range(1 , len(a ) ):
_SCREAMING_SNAKE_CASE =grid[row_n]
_SCREAMING_SNAKE_CASE =fill_row(a , a )
_SCREAMING_SNAKE_CASE =grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase(a : list , a : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(a ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Any = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@add_start_docstrings(_A )
def __call__( self , _A , _A , **_A ):
'''simple docstring'''
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =max_position_embeddings
@add_start_docstrings(_A )
def __call__( self , _A , _A , **_A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =input_ids.shape[-1]
_SCREAMING_SNAKE_CASE =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A , _A ):
'''simple docstring'''
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
'''with `max_length = start_length + max_new_tokens` instead.''' , _A , )
_SCREAMING_SNAKE_CASE =start_length
_SCREAMING_SNAKE_CASE =max_new_tokens
_SCREAMING_SNAKE_CASE =start_length + max_new_tokens
@add_start_docstrings(_A )
def __call__( self , _A , _A , **_A ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A , _A = None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =max_time
_SCREAMING_SNAKE_CASE =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_A )
def __call__( self , _A , _A , **_A ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
@add_start_docstrings(_A )
def __call__( self , _A , _A , **_A ):
'''simple docstring'''
return any(criteria(_A , _A ) for criteria in self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(_A , _A ):
return stopping_criterium.max_length
elif isinstance(_A , _A ):
return stopping_criterium.max_length
return None
def _lowerCAmelCase(a : StoppingCriteriaList , a : int ) -> StoppingCriteriaList:
_SCREAMING_SNAKE_CASE =stopping_criteria.max_length
_SCREAMING_SNAKE_CASE =deepcopy(a )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , a )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a ) )
return new_stopping_criteria
| 165 | 1 |
from maths.prime_factors import prime_factors
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
SCREAMING_SNAKE_CASE : List[str] =f'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
SCREAMING_SNAKE_CASE : int =4
SCREAMING_SNAKE_CASE : Optional[Any] =48
SCREAMING_SNAKE_CASE : str ='''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] =[6, 6, 6, 6]
SCREAMING_SNAKE_CASE : Optional[int] =60
SCREAMING_SNAKE_CASE : Any =[6, 6, 6, 6]
SCREAMING_SNAKE_CASE : List[str] ='''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =4
SCREAMING_SNAKE_CASE : List[Any] ='''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] =1
SCREAMING_SNAKE_CASE : Tuple =1
SCREAMING_SNAKE_CASE : str =126
SCREAMING_SNAKE_CASE : Union[str, Any] =7
SCREAMING_SNAKE_CASE : List[str] =255.0
SCREAMING_SNAKE_CASE : Optional[int] =''''''
return config
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
SCREAMING_SNAKE_CASE : List[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE : List[Any] =name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE : Dict =name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
SCREAMING_SNAKE_CASE : Any =name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE : Dict =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE : List[str] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE : str =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
SCREAMING_SNAKE_CASE : Optional[Any] =name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] =name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
SCREAMING_SNAKE_CASE : int =name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE : str =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE : Optional[int] =name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE : Optional[int] ='''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE : Optional[Any] ='''layernorm.bias'''
if "conv_first" in name:
SCREAMING_SNAKE_CASE : Any =name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
SCREAMING_SNAKE_CASE : Dict =name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
SCREAMING_SNAKE_CASE : Tuple =name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
SCREAMING_SNAKE_CASE : List[str] =name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
SCREAMING_SNAKE_CASE : Tuple =name.replace('''upsample.2''' , '''upsample.convolution_1''' )
SCREAMING_SNAKE_CASE : Optional[int] ='''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
SCREAMING_SNAKE_CASE : Union[str, Any] =name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
SCREAMING_SNAKE_CASE : int =name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
SCREAMING_SNAKE_CASE : Tuple ='''swin2sr.''' + name
return name
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[Any] =orig_state_dict.pop(__a )
if "qkv" in key:
SCREAMING_SNAKE_CASE : str =key.split('''.''' )
SCREAMING_SNAKE_CASE : Tuple =int(key_split[1] )
SCREAMING_SNAKE_CASE : Any =int(key_split[4] )
SCREAMING_SNAKE_CASE : Any =config.embed_dim
if "weight" in key:
SCREAMING_SNAKE_CASE : Union[str, Any] =val[:dim, :]
SCREAMING_SNAKE_CASE : str =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : List[str] =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Tuple =val[:dim]
SCREAMING_SNAKE_CASE : Optional[int] =val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Tuple =val[-dim:]
pass
else:
SCREAMING_SNAKE_CASE : List[str] =val
return orig_state_dict
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict =get_config(__a )
SCREAMING_SNAKE_CASE : int =SwinaSRForImageSuperResolution(__a )
model.eval()
SCREAMING_SNAKE_CASE : str =torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
SCREAMING_SNAKE_CASE : str =convert_state_dict(__a , __a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] =model.load_state_dict(__a , strict=__a )
if len(__a ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__a ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
SCREAMING_SNAKE_CASE : Tuple ='''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
SCREAMING_SNAKE_CASE : List[str] =Image.open(requests.get(__a , stream=__a ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Optional[Any] =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE : int =126 if '''Jpeg''' in checkpoint_url else 256
SCREAMING_SNAKE_CASE : int =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE : Union[str, Any] =transforms(__a ).unsqueeze(0 )
if config.num_channels == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] =pixel_values[:, 0, :, :].unsqueeze(1 )
SCREAMING_SNAKE_CASE : Tuple =model(__a )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =torch.Size([1, 3, 512, 512] )
SCREAMING_SNAKE_CASE : int =torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] =torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE : int =torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
SCREAMING_SNAKE_CASE : Optional[int] =torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE : List[str] =torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
SCREAMING_SNAKE_CASE : str =torch.Size([1, 3, 512, 512] )
SCREAMING_SNAKE_CASE : List[Any] =torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] =torch.Size([1, 3, 1024, 1024] )
SCREAMING_SNAKE_CASE : Optional[int] =torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __a , atol=1E-3 )
print('''Looks ok!''' )
SCREAMING_SNAKE_CASE : List[str] ={
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
SCREAMING_SNAKE_CASE : Optional[int] =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__a )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
_A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 258 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowercase_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 131 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = R'''\w+[.]\d+'''
__SCREAMING_SNAKE_CASE : Optional[int] = re.findall(snake_case , snake_case )
for pat in pats:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(snake_case , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__SCREAMING_SNAKE_CASE : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__SCREAMING_SNAKE_CASE : int = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( snake_case , snake_case , snake_case=42 ):
"""simple docstring"""
# Step 1: Convert pytorch tensor to numpy
__SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__SCREAMING_SNAKE_CASE : Dict = flax_model.init_weights(PRNGKey(snake_case ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = flatten_dict(snake_case )
__SCREAMING_SNAKE_CASE : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
__SCREAMING_SNAKE_CASE : Dict = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = rename_key_and_reshape_tensor(snake_case , snake_case , snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
__SCREAMING_SNAKE_CASE : Any = jnp.asarray(snake_case )
return unflatten_dict(snake_case )
| 131 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=2 , snake_case=True , snake_case=False , snake_case=10 , snake_case=3 , snake_case=32 * 8 , snake_case=32 * 8 , snake_case=4 , snake_case=64 , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = hidden_dim
UpperCamelCase__ = hidden_dim
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=snake_case ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase__ = self.num_queries
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = [1, 1, 1, 1]
UpperCamelCase__ = self.num_channels
UpperCamelCase__ = 64
UpperCamelCase__ = 128
UpperCamelCase__ = self.hidden_dim
UpperCamelCase__ = self.hidden_dim
UpperCamelCase__ = self.hidden_dim
return config
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def snake_case__ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case ) , config.decoder_layers )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case=False ):
'''simple docstring'''
with torch.no_grad():
UpperCamelCase__ = MaskaFormerModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCamelCase__ = model(pixel_values=snake_case , pixel_mask=snake_case )
UpperCamelCase__ = model(snake_case , output_hidden_states=snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case , snake_case )
def snake_case__ ( self , snake_case , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCamelCase__ = MaskaFormerForUniversalSegmentation(config=snake_case )
model.to(snake_case )
model.eval()
def comm_check_on_output(snake_case ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=snake_case , pixel_mask=snake_case )
UpperCamelCase__ = model(snake_case )
comm_check_on_output(snake_case )
UpperCamelCase__ = model(
pixel_values=snake_case , pixel_mask=snake_case , mask_labels=snake_case , class_labels=snake_case )
comm_check_on_output(snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Dict = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCamelCase : Tuple = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = False
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MaskaFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase__ = MaskaFormerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case ),
"mask_labels": torch.randn((2, 10, *size) , device=snake_case ),
"class_labels": torch.zeros(2 , 10 , device=snake_case ).long(),
}
UpperCamelCase__ = self.model_tester.get_config()
UpperCamelCase__ = MaskaFormerForUniversalSegmentation(snake_case ).to(snake_case )
UpperCamelCase__ = model(**snake_case )
self.assertTrue(outputs.loss is not None )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case , **snake_case , output_hidden_states=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(snake_case ).to(snake_case )
UpperCamelCase__ = model(**snake_case , output_attentions=snake_case )
self.assertTrue(outputs.attentions is not None )
def snake_case__ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(snake_case )
model.to(snake_case )
model.train()
UpperCamelCase__ = model(snake_case , mask_labels=snake_case , class_labels=snake_case ).loss
loss.backward()
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(snake_case ).to(snake_case )
model.train()
UpperCamelCase__ = model(snake_case , mask_labels=snake_case , class_labels=snake_case )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase = 1E-4
def UpperCamelCase_( )-> str:
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case , return_tensors="pt" ).to(snake_case )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case )
UpperCamelCase__ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) )
UpperCamelCase__ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case , atol=snake_case ) )
UpperCamelCase__ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case , atol=snake_case ) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case ).eval()
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(snake_case , return_tensors="pt" ).to(snake_case )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase__ = model(**snake_case )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase__ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
UpperCamelCase__ = torch.tensor(snake_case ).to(snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case , atol=snake_case ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case , atol=snake_case ) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case ).eval()
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCamelCase__ = inputs["pixel_values"].to(snake_case )
UpperCamelCase__ = [el.to(snake_case ) for el in inputs["mask_labels"]]
UpperCamelCase__ = [el.to(snake_case ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCamelCase__ = model(**snake_case )
self.assertTrue(outputs.loss is not None )
| 551 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = 'xmod'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , snake_case=False , snake_case=2 , snake_case=False , snake_case=True , snake_case=True , snake_case=("en_XX",) , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = pre_norm
UpperCamelCase__ = adapter_reduction_factor
UpperCamelCase__ = adapter_layer_norm
UpperCamelCase__ = adapter_reuse_layer_norm
UpperCamelCase__ = ln_before_adapter
UpperCamelCase__ = list(snake_case )
UpperCamelCase__ = default_language
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 551 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase : List[str] = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = b.T
lowerCamelCase__ = np.sum(np.square(__lowerCAmelCase ) , axis=1 )
lowerCamelCase__ = np.sum(np.square(__lowerCAmelCase ) , axis=0 )
lowerCamelCase__ = np.matmul(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ):
lowerCamelCase__ = x.reshape(-1 , 3 )
lowerCamelCase__ = squared_euclidean_distance(__lowerCAmelCase , __lowerCAmelCase )
return np.argmin(__lowerCAmelCase , axis=1 )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ['pixel_values']
def __init__( self ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = True ,_lowerCAmelCase = True ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = size if size is not None else {"""height""": 2_56, """width""": 2_56}
lowerCamelCase__ = get_size_dict(_lowerCAmelCase )
lowerCamelCase__ = np.array(_lowerCAmelCase ) if clusters is not None else None
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_normalize
lowerCamelCase__ = do_color_quantize
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
lowerCamelCase__ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_lowerCAmelCase ,size=(size["""height"""], size["""width"""]) ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,):
lowerCamelCase__ = rescale(image=_lowerCAmelCase ,scale=1 / 127.5 ,data_format=_lowerCAmelCase )
lowerCamelCase__ = image - 1
return image
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,**_lowerCAmelCase ,):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(_lowerCAmelCase )
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
lowerCamelCase__ = clusters if clusters is not None else self.clusters
lowerCamelCase__ = np.array(_lowerCAmelCase )
lowerCamelCase__ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=_lowerCAmelCase ) for image in images]
if do_color_quantize:
lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
lowerCamelCase__ = np.array(_lowerCAmelCase )
lowerCamelCase__ = color_quantize(_lowerCAmelCase ,_lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
lowerCamelCase__ = images.shape[0]
lowerCamelCase__ = images.reshape(_lowerCAmelCase ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
lowerCamelCase__ = list(_lowerCAmelCase )
else:
lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase ) for image in images]
lowerCamelCase__ = {"""input_ids""": images}
return BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase )
| 701 |
'''simple docstring'''
from __future__ import annotations
import math
def A__ ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
UpperCamelCase : str = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ = []
for num in range(len(__lowerCAmelCase ) ):
lowerCamelCase__ = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ = odd_composites[num] - 2 * i * i
if is_prime(__lowerCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__lowerCAmelCase ) == n:
return list_nums
return []
def A__ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 9 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'luke'
def __init__( self : Tuple,__A : str=5_0_2_6_7,__A : List[Any]=5_0_0_0_0_0,__A : int=7_6_8,__A : Optional[int]=2_5_6,__A : Optional[Any]=1_2,__A : Optional[Any]=1_2,__A : Dict=3_0_7_2,__A : int="gelu",__A : List[Any]=0.1,__A : Tuple=0.1,__A : Any=5_1_2,__A : List[Any]=2,__A : int=0.02,__A : Union[str, Any]=1e-12,__A : Any=True,__A : Any=None,__A : Optional[Any]=1,__A : Any=0,__A : Union[str, Any]=2,**__A : Any,):
super().__init__(pad_token_id=__A,bos_token_id=__A,eos_token_id=__A,**__A )
_lowerCamelCase : str = vocab_size
_lowerCamelCase : List[str] = entity_vocab_size
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Dict = entity_emb_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : str = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Tuple = use_entity_aware_attention
_lowerCamelCase : Dict = classifier_dropout | 44 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : int = ['''image_processor''', '''tokenizer''']
A__ : List[Any] = '''BlipImageProcessor'''
A__ : int = '''AutoTokenizer'''
def __init__( self , A , A , A ) -> str:
super().__init__(A , A )
# add QFormer tokenizer
A: List[str] = qformer_tokenizer
def __call__( self , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = False , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
A: Dict = BatchFeature()
if text is not None:
A: Tuple = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
encoding.update(A )
A: Optional[int] = self.qformer_tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
A: Union[str, Any] = qformer_text_encoding.pop("""input_ids""" )
A: Any = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
A: Union[str, Any] = self.image_processor(A , return_tensors=A )
encoding.update(A )
return encoding
def a__ ( self , *A , **A ) -> Dict:
return self.tokenizer.batch_decode(*A , **A )
def a__ ( self , *A , **A ) -> List[str]:
return self.tokenizer.decode(*A , **A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a__ ( self ) -> int:
A: Any = self.tokenizer.model_input_names
A: Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def a__ ( self , A , **A ) -> Optional[int]:
if os.path.isfile(A ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(A , exist_ok=A )
A: Union[str, Any] = os.path.join(A , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(A )
return super().save_pretrained(A , **A )
@classmethod
def a__ ( cls , A , **A ) -> List[str]:
A: int = AutoTokenizer.from_pretrained(A , subfolder="""qformer_tokenizer""" )
A: List[str] = cls._get_arguments_from_pretrained(A , **A )
args.append(A )
return cls(*A )
| 135 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__magic_name__ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = to_pil_image(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pil_image.size
__SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCamelCase_ , lang=UpperCamelCase_ , output_type="""dict""" , config=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCamelCase_ ) if not word.strip()]
__SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCamelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase_ )
# finally, normalize the bounding boxes
__SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_5_5 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = "" , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_value
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
__SCREAMING_SNAKE_CASE = apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
__SCREAMING_SNAKE_CASE = (size["""height"""], size["""width"""])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
__SCREAMING_SNAKE_CASE = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""")
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(lowerCAmelCase__) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for image in images:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = apply_tesseract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
words_batch.append(lowerCAmelCase__)
boxes_batch.append(lowerCAmelCase__)
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
__SCREAMING_SNAKE_CASE = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCAmelCase__)
if apply_ocr:
__SCREAMING_SNAKE_CASE = words_batch
__SCREAMING_SNAKE_CASE = boxes_batch
return data
| 710 |
"""simple docstring"""
__magic_name__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__magic_name__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = from_type.lower().strip("""s""" )
__SCREAMING_SNAKE_CASE = to_type.lower().strip("""s""" )
__SCREAMING_SNAKE_CASE = UNIT_SYMBOL.get(UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = UNIT_SYMBOL.get(UpperCamelCase_ , UpperCamelCase_ )
if from_sanitized not in METRIC_CONVERSION:
__SCREAMING_SNAKE_CASE = (
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(UpperCamelCase_ )}"
)
raise ValueError(UpperCamelCase_ )
if to_sanitized not in METRIC_CONVERSION:
__SCREAMING_SNAKE_CASE = (
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(UpperCamelCase_ )}"
)
raise ValueError(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = METRIC_CONVERSION[from_sanitized]
__SCREAMING_SNAKE_CASE = METRIC_CONVERSION[to_sanitized]
__SCREAMING_SNAKE_CASE = 1
if from_exponent > to_exponent:
__SCREAMING_SNAKE_CASE = from_exponent - to_exponent
else:
__SCREAMING_SNAKE_CASE = -(to_exponent - from_exponent)
return value * pow(10 , UpperCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 248 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> Union[str, Any]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase_: List[str] = TapasConfig.from_json_file(_a )
# set absolute/relative position embeddings parameter
UpperCAmelCase_: List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_: str = TapasForQuestionAnswering(config=_a )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_: Any = 4
UpperCAmelCase_: List[str] = True
# hparam_utils.py hparams
UpperCAmelCase_: Dict = 0.664_694
UpperCAmelCase_: int = 0.207_951
UpperCAmelCase_: int = 0.121_194
UpperCAmelCase_: Any = True
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: List[str] = False
UpperCAmelCase_: Optional[int] = 0.0_352_513
UpperCAmelCase_: List[str] = TapasForQuestionAnswering(config=_a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_: Tuple = 4
UpperCAmelCase_: Dict = False
# hparam_utils.py hparams
UpperCAmelCase_: Optional[int] = 36.4_519
UpperCAmelCase_: List[str] = 0.903_421
UpperCAmelCase_: List[str] = 222.088
UpperCAmelCase_: Any = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Dict = True
UpperCAmelCase_: Union[str, Any] = 0.763_141
UpperCAmelCase_: int = TapasForQuestionAnswering(config=_a )
elif task == "TABFACT":
UpperCAmelCase_: Optional[Any] = TapasForSequenceClassification(config=_a )
elif task == "MLM":
UpperCAmelCase_: str = TapasForMaskedLM(config=_a )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_: List[Any] = TapasModel(config=_a )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_a ,_a ,_a )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
UpperCAmelCase_: Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" ,model_max_length=512 )
tokenizer.save_pretrained(_a )
print("Used relative position embeddings:" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 137 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 137 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(10 , 10 )
UpperCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase = Accelerator()
UpperCamelCase = accelerator.prepare(UpperCamelCase_ )
try:
pickle.loads(pickle.dumps(UpperCamelCase_ ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 704 |
def __magic_name__ ( lowercase_ = 600851475143 ) -> int:
'''simple docstring'''
try:
UpperCamelCase = int(lowercase_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCamelCase = 1
UpperCamelCase = 2
while i * i <= n:
while n % i == 0:
UpperCamelCase = i
n //= i
i += 1
if n > 1:
UpperCamelCase = n
return int(lowercase_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 414 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ (__A ):
def __init__( self : List[Any] , lowerCAmelCase_ : TransformeraDModel , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : KarrasDiffusionSchedulers , lowerCAmelCase_ : Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase_ , vae=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCAmelCase_ : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCAmelCase_ : Union[str, Any] = int(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = dict(sorted(self.labels.items() ) )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ )
UpperCAmelCase_ : int = self.transformer.config.sample_size
UpperCAmelCase_ : Optional[Any] = self.transformer.config.in_channels
UpperCAmelCase_ : Tuple = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCAmelCase_ : str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , device=self.device ).reshape(-1 )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
UpperCAmelCase_ : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCAmelCase_ : Optional[Any] = latent_model_input[: len(lowerCAmelCase_ ) // 2]
UpperCAmelCase_ : int = torch.cat([half, half] , dim=0 )
UpperCAmelCase_ : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = t
if not torch.is_tensor(lowerCAmelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCAmelCase_ : List[Any] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
UpperCAmelCase_ : int = torch.intaa if is_mps else torch.intaa
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] , dtype=lowerCAmelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCAmelCase_ : List[Any] = self.transformer(
lowerCAmelCase_ , timestep=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = torch.split(lowerCAmelCase_ , len(lowerCAmelCase_ ) // 2 , dim=0 )
UpperCAmelCase_ : Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCAmelCase_ : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = torch.split(lowerCAmelCase_ , lowerCAmelCase_ , dim=1 )
else:
UpperCAmelCase_ : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
if guidance_scale > 1:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
UpperCAmelCase_ : Optional[int] = latent_model_input
UpperCAmelCase_ : Dict = 1 / self.vae.config.scaling_factor * latents
UpperCAmelCase_ : Dict = self.vae.decode(lowerCAmelCase_ ).sample
UpperCAmelCase_ : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 95 | 1 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ="T5Config"
class A__( __magic_name__ ):
lowerCAmelCase = '''mt5'''
lowerCAmelCase = MTaConfig
class A__( __magic_name__ ):
lowerCAmelCase = '''mt5'''
lowerCAmelCase = MTaConfig
class A__( __magic_name__ ):
lowerCAmelCase = '''mt5'''
lowerCAmelCase = MTaConfig
| 690 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__( unittest.TestCase ):
def _a ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : str ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE ).to_dict()
config_dict.pop('''image_processor_type''' )
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE )
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE )
config.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier''' ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''clip-base''' )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _a ( self : int ) -> Any:
"""simple docstring"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _a ( self : Optional[Any] ) -> str:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''preprocessor_config.json'''
__SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w''' ) )
__SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
class A__( __magic_name__ ):
lowerCAmelCase = True
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE )
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 690 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = []
for i in range(encoder_config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias"))
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
])
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
for i in range(encoder_config.num_hidden_layers):
# queries, keys and values (only weights, no biases)
__snake_case = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight")
__snake_case = in_proj_weight[
: encoder_config.hidden_size, :
]
__snake_case = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__snake_case = in_proj_weight[
-encoder_config.hidden_size :, :
]
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case):
__snake_case = dct.pop(snake_case)
__snake_case = val
def SCREAMING_SNAKE_CASE ( snake_case):
if "handwritten" in checkpoint_url:
__snake_case = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__snake_case = Image.open(requests.get(snake_case, stream=snake_case).raw).convert('''RGB''')
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case, snake_case):
__snake_case = ViTConfig(image_size=3_84, qkv_bias=snake_case)
__snake_case = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__snake_case = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
__snake_case = 10_24
__snake_case = 40_96
__snake_case = 24
__snake_case = 16
__snake_case = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''')
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case = False
__snake_case = '''relu'''
__snake_case = 10_24
__snake_case = True
__snake_case = False
__snake_case = False
# load HuggingFace model
__snake_case = ViTModel(snake_case, add_pooling_layer=snake_case)
__snake_case = TrOCRForCausalLM(snake_case)
__snake_case = VisionEncoderDecoderModel(encoder=snake_case, decoder=snake_case)
model.eval()
# load state_dict of original model, rename some keys
__snake_case = torch.hub.load_state_dict_from_url(snake_case, map_location='''cpu''', check_hash=snake_case)['''model''']
__snake_case = create_rename_keys(snake_case, snake_case)
for src, dest in rename_keys:
rename_key(snake_case, snake_case, snake_case)
read_in_q_k_v(snake_case, snake_case)
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__snake_case = state_dict.pop(snake_case)
if key.startswith('''decoder''') and "output_projection" not in key:
__snake_case = val
else:
__snake_case = val
# load state dict
model.load_state_dict(snake_case)
# Check outputs on an image
__snake_case = ViTImageProcessor(size=encoder_config.image_size)
__snake_case = RobertaTokenizer.from_pretrained('''roberta-large''')
__snake_case = TrOCRProcessor(snake_case, snake_case)
__snake_case = processor(images=prepare_img(snake_case), return_tensors='''pt''').pixel_values
# verify logits
__snake_case = torch.tensor([[model.config.decoder.decoder_start_token_id]])
__snake_case = model(pixel_values=snake_case, decoder_input_ids=snake_case)
__snake_case = outputs.logits
__snake_case = torch.Size([1, 1, 5_02_65])
if "trocr-base-handwritten" in checkpoint_url:
__snake_case = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311])
elif "trocr-large-handwritten" in checkpoint_url:
__snake_case = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170])
elif "trocr-base-printed" in checkpoint_url:
__snake_case = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210])
elif "trocr-large-printed" in checkpoint_url:
__snake_case = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535])
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10], snake_case, atol=1E-3), "First elements of logits not as expected"
Path(snake_case).mkdir(exist_ok=snake_case)
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(snake_case)
print(f"Saving processor to {pytorch_dump_folder_path}")
processor.save_pretrained(snake_case)
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowercase : str = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 564 | """simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case = 1_00, ):
__snake_case = x_start
__snake_case = fnc(snake_case)
__snake_case = 0.0
for _ in range(snake_case):
# Approximates curve as a sequence of linear lines and sums their length
__snake_case = (x_end - x_start) / steps + xa
__snake_case = fnc(snake_case)
length += math.hypot(xa - xa, fxa - fxa)
# Increment step
__snake_case = xa
__snake_case = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( snake_case):
return math.sin(10 * x)
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__lowercase : List[str] = 10
while i <= 10_0000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 564 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[str] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_snake_case : Optional[int] = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = vqa_pipeline(lowerCamelCase_ , top_k=1 )
self.assertEqual(
lowerCamelCase_ , [
[{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}],
[{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}],
] , )
@require_torch
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_snake_case : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
_snake_case : Any = 'How many cats are there?'
_snake_case : Any = vqa_pipeline(image=lowerCamelCase_ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}] )
_snake_case : Tuple = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
lowerCamelCase_ , [{'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}, {'score': ANY(lowerCamelCase_ ), 'answer': ANY(lowerCamelCase_ )}] )
@slow
@require_torch
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
_snake_case : Tuple = './tests/fixtures/tests_samples/COCO/000000039769.png'
_snake_case : int = 'How many cats are there?'
_snake_case : List[str] = vqa_pipeline(image=lowerCamelCase_ , question=lowerCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_snake_case : Union[str, Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_snake_case : Any = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
def UpperCamelCase ( _A : str , _A : list[str] )-> List[str]:
"""simple docstring"""
A__ = ""
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception("join() accepts only strings to be joined" )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 491 | '''simple docstring'''
import math
def __lowerCamelCase ( _UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 1:
UpperCAmelCase_ = F"""Input value of [number={number}] must be > 0"""
raise ValueError(_UpperCamelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase_ = int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase_ = [3, 5]
UpperCAmelCase_ = 2
UpperCAmelCase_ = 3
for block in range(1 , _UpperCamelCase ):
for _ in range(_UpperCamelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : Dict = 0
try:
lowercase__ : Any = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 390 | 0 |
"""simple docstring"""
from math import sqrt
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( __lowerCamelCase = 1_00_01 ) -> Any:
lowercase__ : Tuple = 0
lowercase__ : Any = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A_ )
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : str = field(default="language-modeling" ,metadata={"include_in_asdict_even_if_is_default": True} )
lowerCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
lowerCAmelCase : ClassVar[Features] = Features({} )
lowerCAmelCase : str = "text"
@property
def UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 122 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
__UpperCAmelCase = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617 | 0 |
from __future__ import annotations
from fractions import Fraction
def _a ( __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _a ( __lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = 11
__UpperCamelCase = int('1' + '0' * digit_len )
for num in range(_UpperCamelCase , _UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_UpperCamelCase , _UpperCamelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
__UpperCamelCase = 10
return solutions
def _a ( __lowercase = 2 ) -> Any:
"""simple docstring"""
__UpperCamelCase = 1.0
for fraction in fraction_list(_UpperCamelCase ):
__UpperCamelCase = Fraction(_UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 721 |
def _a ( __lowercase ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: str = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Any = '''gpt_neo'''
lowercase__ : int = ['''past_key_values''']
lowercase__ : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowerCAmelCase__=5_02_57 , lowerCAmelCase__=20_48 , lowerCAmelCase__=20_48 , lowerCAmelCase__=24 , lowerCAmelCase__=[[["global", "local"], 12]] , lowerCAmelCase__=16 , lowerCAmelCase__=None , lowerCAmelCase__=2_56 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=5_02_56 , lowerCAmelCase__=5_02_56 , **lowerCAmelCase__ , ) -> int:
__magic_name__ : Any = vocab_size
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : Tuple = hidden_size
__magic_name__ : Tuple = num_layers
__magic_name__ : Optional[int] = num_heads
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Tuple = window_size
__magic_name__ : str = activation_function
__magic_name__ : Union[str, Any] = resid_dropout
__magic_name__ : str = embed_dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Union[str, Any] = classifier_dropout
__magic_name__ : List[str] = layer_norm_epsilon
__magic_name__ : Tuple = initializer_range
__magic_name__ : Any = use_cache
__magic_name__ : Optional[int] = bos_token_id
__magic_name__ : Union[str, Any] = eos_token_id
__magic_name__ : Optional[Any] = attention_types
__magic_name__ : Optional[Any] = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def __magic_name__ ( lowerCAmelCase__ ) -> List[str]:
__magic_name__ : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
import torch
__magic_name__ : Tuple = input.size()
__magic_name__ : str = len(_A )
__magic_name__ : Optional[int] = shape[dimension]
__magic_name__ : List[str] = torch.arange(0, _A, _A )
__magic_name__ : Optional[Any] = torch.div(sizedim - size, _A, rounding_mode="""floor""" ) + 1
__magic_name__ : Union[str, Any] = torch.arange(_A ) + low_indices[:min_length][:, None]
__magic_name__ : Optional[Any] = [slice(_A )] * rank
__magic_name__ : int = indices
__magic_name__ : Optional[Any] = input[s]
__magic_name__ : Any = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(_A )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
import torch
__magic_name__ : str = torch.arange(1, _A )
__magic_name__ : Union[str, Any] = torch.remainder(_A, _A )
__magic_name__ : Dict = remainders == 0
__magic_name__ : Dict = candidates[divisor_indices]
__magic_name__ : Tuple = torch.max(_A )
return largest_divisor, torch.div(_A, _A, rounding_mode="""floor""" )
class snake_case__ ( _lowerCAmelCase ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""" )
__magic_name__ : Tuple = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__magic_name__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __magic_name__ ( self ) -> int:
return self._config.num_heads
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
__magic_name__ : List[str] = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
__magic_name__ : Optional[int] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__magic_name__ ,__magic_name__ : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__magic_name__ : str = seqlen + 2
__magic_name__ : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__magic_name__ : Optional[Any] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
__magic_name__ : Any = common_inputs["""attention_mask"""]
if self.use_past:
__magic_name__ : Optional[Any] = ordered_inputs["""attention_mask"""].dtype
__magic_name__ : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self ) -> int:
return 13
| 324 |
import math
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = len(_A )
__magic_name__ : Tuple = int(math.floor(math.sqrt(_A ) ) )
__magic_name__ : Optional[int] = 0
while arr[min(_A, _A ) - 1] < x:
__magic_name__ : Tuple = step
step += int(math.floor(math.sqrt(_A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__magic_name__ : Union[str, Any] = prev + 1
if prev == min(_A, _A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__magic_name__: List[Any] = input("Enter numbers separated by a comma:\n").strip()
__magic_name__: List[str] = [int(item) for item in user_input.split(",")]
__magic_name__: Optional[int] = int(input("Enter the number to be searched:\n"))
__magic_name__: str = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F"""Number {x} is at index {res}""")
| 324 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__lowerCamelCase = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["GLPNFeatureExtractor"]
__lowerCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_UpperCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase)
class snake_case__ ( UpperCamelCase):
def __init__( self : str , *_A : Union[str, Any] , **_A : List[str] ) -> int:
super().__init__(*_A , **_A )
requires_backends(self , '''vision''' )
self.check_model_type(_A )
def __call__( self : Any , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : int ) -> Union[str, Any]:
return super().__call__(_A , **_A )
def A ( self : List[Any] , **_A : Optional[int] ) -> Dict:
return {}, {}, {}
def A ( self : int , _A : Optional[int] ) -> int:
UpperCAmelCase_ : Any = load_image(_A )
UpperCAmelCase_ : List[Any] = image.size
UpperCAmelCase_ : Optional[int] = self.image_processor(images=_A , return_tensors=self.framework )
return model_inputs
def A ( self : Dict , _A : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model(**_A )
return model_outputs
def A ( self : List[Any] , _A : Optional[int] ) -> List[str]:
UpperCAmelCase_ : int = model_outputs.predicted_depth
UpperCAmelCase_ : str = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=_A )
UpperCAmelCase_ : List[str] = prediction.squeeze().cpu().numpy()
UpperCAmelCase_ : Dict = (output * 2_55 / np.max(_A )).astype('''uint8''' )
UpperCAmelCase_ : int = Image.fromarray(_A )
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Any = predicted_depth
UpperCAmelCase_ : Tuple = depth
return output_dict
| 541 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __UpperCAmelCase ( A : Optional[int] ) -> List[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def __UpperCAmelCase ( A : str ) -> Optional[Any]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase_ : str = ord(A )
if not _is_chinese_char(A ):
return 0
return 1
def __UpperCAmelCase ( A : List[str] ) -> Dict:
UpperCAmelCase_ : Optional[Any] = set()
for token in tokens:
UpperCAmelCase_ : str = len(A ) > 1 and is_chinese(A )
if chinese_word:
word_set.add(A )
UpperCAmelCase_ : Optional[int] = list(A )
return word_list
def __UpperCAmelCase ( A : List[str] , A : set() ) -> Optional[Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase_ : Dict = max([len(A ) for w in chinese_word_set] )
UpperCAmelCase_ : List[str] = bert_tokens
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, len(A )
while start < end:
UpperCAmelCase_ : str = True
if is_chinese(bert_word[start] ):
UpperCAmelCase_ : str = min(end - start , A )
for i in range(A , 1 , -1 ):
UpperCAmelCase_ : Tuple = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase_ : Union[str, Any] = '''##''' + bert_word[j]
UpperCAmelCase_ : Any = start + i
UpperCAmelCase_ : Optional[int] = False
break
if single_word:
start += 1
return bert_word
def __UpperCAmelCase ( A : List[str] , A : LTP , A : BertTokenizer ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = []
for i in range(0 , len(A ) , 1_0_0 ):
UpperCAmelCase_ : int = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['''cws'''] ).cws
UpperCAmelCase_ : Any = [get_chinese_word(A ) for r in res]
ltp_res.extend(A )
assert len(A ) == len(A )
UpperCAmelCase_ : Tuple = []
for i in range(0 , len(A ) , 1_0_0 ):
UpperCAmelCase_ : Optional[int] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=A , truncation=A , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(A ) == len(A )
UpperCAmelCase_ : Any = []
for input_ids, chinese_word in zip(A , A ):
UpperCAmelCase_ : Union[str, Any] = []
for id in input_ids:
UpperCAmelCase_ : Union[str, Any] = bert_tokenizer._convert_id_to_token(A )
input_tokens.append(A )
UpperCAmelCase_ : List[str] = add_sub_symbol(A , A )
UpperCAmelCase_ : Any = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A ):
if token[:2] == "##":
UpperCAmelCase_ : int = token[2:]
# save chinese tokens' pos
if len(A ) == 1 and _is_chinese_char(ord(A ) ):
ref_id.append(A )
ref_ids.append(A )
assert len(A ) == len(A )
return ref_ids
def __UpperCAmelCase ( A : List[Any] ) -> Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = f.readlines()
UpperCAmelCase_ : List[str] = [line.strip() for line in data if len(A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase_ : Tuple = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase_ : Optional[int] = prepare_ref(A , A , A )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : str = [json.dumps(A ) + '''\n''' for ref in ref_ids]
f.writelines(A )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_UpperCamelCase : Any = parser.parse_args()
main(args)
| 541 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 719 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 596 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Tuple = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} )
snake_case_ : str = field(
default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
snake_case_ : Tuple = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} )
snake_case_ : Dict = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
snake_case_ : Optional[Any] = field(default=2 ,metadata={"""help""": """Batch size for training."""} )
snake_case_ : Tuple = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} )
snake_case_ : List[str] = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} )
snake_case_ : str = field(
default=1_00_00 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
snake_case_ : str = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} )
snake_case_ : Tuple = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} )
snake_case_ : List[str] = field(
default=7_50 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
snake_case_ : Tuple = field(
default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} )
snake_case_ : Tuple = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
snake_case_ : Optional[Any] = field(default=5_00_00 ,metadata={"""help""": """Maximum number of training steps."""} )
snake_case_ : List[str] = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
snake_case_ : Dict = field(default=10_24 ,metadata={"""help""": """Sequence lengths used for training."""} )
snake_case_ : str = field(default=1 ,metadata={"""help""": """Training seed."""} )
snake_case_ : Optional[Any] = field(
default=10_24 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,)
snake_case_ : int = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
snake_case_ : Union[str, Any] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Union[str, Any] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
snake_case_ : List[str] = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
snake_case_ : int = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} )
snake_case_ : int = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
snake_case_ : Any = field(default=10_24 ,metadata={"""help""": """Length of sequences to be evaluated."""} )
snake_case_ : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Optional[int] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
snake_case_ : Any = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Number of workers used for code evaluation."""} )
snake_case_ : List[str] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,)
snake_case_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Sample from the language model\'s output distribution."""} )
snake_case_ : Optional[int] = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} )
snake_case_ : Optional[int] = field(default=2_56 ,metadata={"""help""": """Maximum number of newly generated tokens."""} )
snake_case_ : List[Any] = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} )
snake_case_ : Dict = field(default=0.95 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
snake_case_ : int = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} )
snake_case_ : Optional[Any] = field(
default=2_00 ,metadata={"""help""": """Number of completions to generate for each sample."""} )
snake_case_ : List[Any] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
snake_case_ : Tuple = field(
default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} )
snake_case_ : str = field(
default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
snake_case_ : List[Any] = field(
default=-1 ,metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} ,)
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : int = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} ,)
snake_case_ : Union[str, Any] = field(
default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} )
snake_case_ : List[str] = field(
default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} )
snake_case_ : Union[str, Any] = field(
default=10_00_00 ,metadata={"""help""": """Number of files to save per JSON output file."""} )
snake_case_ : Dict = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
snake_case_ : int = field(
default=10_00 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
snake_case_ : int = field(
default=1_00 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
snake_case_ : Tuple = field(
default=0.25 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
snake_case_ : List[Any] = field(
default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
snake_case_ : Optional[Any] = field(
default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
snake_case_ : Any = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,)
snake_case_ : Dict = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """If True, near-duplicate samples are removed."""} )
snake_case_ : Tuple = field(
default=0.85 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : str = field(
default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
snake_case_ : Tuple = field(
default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} )
snake_case_ : Tuple = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
snake_case_ : str = field(default=20_00_00 ,metadata={"""help""": """Number of examples to train tokenizer on."""} )
snake_case_ : List[str] = field(
default=3_27_68 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} )
snake_case_ : List[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} )
snake_case_ : Tuple = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Optional[int] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} )
snake_case_ : Optional[int] = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
snake_case_ : Optional[int] = field(
default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} )
snake_case_ : List[str] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class snake_case :
'''simple docstring'''
snake_case_ : Dict = field(
default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} )
snake_case_ : Union[str, Any] = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} )
snake_case_ : Union[str, Any] = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} )
snake_case_ : Optional[int] = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 477 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__ :
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
_UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_UpperCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , thresholding=snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
_UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
_UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = inputs['prompt']
_UpperCAmelCase = inputs['generator']
_UpperCAmelCase = inputs['num_inference_steps']
_UpperCAmelCase = inputs['output_type']
if "image" in inputs:
_UpperCAmelCase = inputs['image']
else:
_UpperCAmelCase = None
if "mask_image" in inputs:
_UpperCAmelCase = inputs['mask_image']
else:
_UpperCAmelCase = None
if "original_image" in inputs:
_UpperCAmelCase = inputs['original_image']
else:
_UpperCAmelCase = None
_UpperCAmelCase , _UpperCAmelCase = pipe.encode_prompt(snake_case )
# inputs with prompt converted to embeddings
_UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_UpperCAmelCase = image
if mask_image is not None:
_UpperCAmelCase = mask_image
if original_image is not None:
_UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(snake_case , snake_case , snake_case )
_UpperCAmelCase = pipe(**snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case )
_UpperCAmelCase = self.pipeline_class.from_pretrained(snake_case )
pipe_loaded.to(snake_case )
pipe_loaded.set_progress_bar_config(disable=snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(snake_case , snake_case ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = inputs['generator']
_UpperCAmelCase = inputs['num_inference_steps']
_UpperCAmelCase = inputs['output_type']
# inputs with prompt converted to embeddings
_UpperCAmelCase = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_UpperCAmelCase = image
if mask_image is not None:
_UpperCAmelCase = mask_image
if original_image is not None:
_UpperCAmelCase = original_image
_UpperCAmelCase = pipe_loaded(**snake_case )[0]
_UpperCAmelCase = np.abs(to_np(snake_case ) - to_np(snake_case ) ).max()
self.assertLess(snake_case , 1E-4 )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(snake_case )
_UpperCAmelCase = self.pipeline_class.from_pretrained(snake_case )
pipe_loaded.to(snake_case )
pipe_loaded.set_progress_bar_config(disable=snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe_loaded(**snake_case )[0]
_UpperCAmelCase = np.abs(to_np(snake_case ) - to_np(snake_case ) ).max()
self.assertLess(snake_case , 1E-4 )
| 573 | 0 |
UpperCAmelCase_ : int = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : str = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Any = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : Any = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 367 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_text_model"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[Any] , __snake_case :Dict=5_02_44 , __snake_case :Dict=7_68 , __snake_case :Union[str, Any]=64 , __snake_case :Union[str, Any]=20_48 , __snake_case :List[Any]=12 , __snake_case :Any=12 , __snake_case :str=32 , __snake_case :Dict=1_28 , __snake_case :Optional[int]=0.1 , __snake_case :Any=1E-6 , __snake_case :Optional[int]=1.0 , __snake_case :List[Any]="gelu_new" , __snake_case :Any=0 , __snake_case :Tuple=False , __snake_case :Any=0 , __snake_case :int=1 , __snake_case :Dict=False , __snake_case :Optional[Any]=True , **__snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Optional[int] =d_kv
__magic_name__ : Dict =d_ff
__magic_name__ : Tuple =num_layers
__magic_name__ : str =num_heads
__magic_name__ : Optional[Any] =relative_attention_num_buckets
__magic_name__ : List[Any] =relative_attention_max_distance
__magic_name__ : str =dropout_rate
__magic_name__ : Optional[Any] =layer_norm_epsilon
__magic_name__ : Union[str, Any] =initializer_factor
__magic_name__ : Tuple =use_cache
__magic_name__ : Any =eos_token_id
__magic_name__ : int =decoder_start_token_id
# for backwards compatibility
__magic_name__ : Union[str, Any] =dense_act_fn
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , tie_word_embeddings=__snake_case , is_decoder=__snake_case , **__snake_case , )
@classmethod
def A__ ( cls :Union[str, Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[Any] =cls.get_config_dict(__snake_case , **__snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : Tuple =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_vision_model"""
def __init__( self :Union[str, Any] , __snake_case :Tuple=7_68 , __snake_case :Any=7_68 , __snake_case :Tuple=20_48 , __snake_case :Dict=64 , __snake_case :List[str]=12 , __snake_case :str=12 , __snake_case :str="gelu_new" , __snake_case :Optional[int]=1E-6 , __snake_case :Optional[int]=0.0 , __snake_case :Any=0.0 , __snake_case :Any=1E-10 , __snake_case :List[str]=1.0 , __snake_case :Tuple=40_96 , __snake_case :Optional[int]=32 , __snake_case :Union[str, Any]=1_28 , **__snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : Dict =patch_embed_hidden_size
__magic_name__ : Tuple =d_ff
__magic_name__ : str =dropout_rate
__magic_name__ : str =num_hidden_layers
__magic_name__ : Dict =num_attention_heads
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =initializer_factor
__magic_name__ : str =attention_dropout
__magic_name__ : Union[str, Any] =layer_norm_eps
__magic_name__ : List[str] =dense_act_fn
__magic_name__ : List[str] =seq_len
__magic_name__ : str =relative_attention_num_buckets
__magic_name__ : Optional[int] =relative_attention_max_distance
__magic_name__ : List[str] =d_kv
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[int] =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : int =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct"""
UpperCamelCase = True
def __init__( self :Union[str, Any] , __snake_case :Optional[int]=None , __snake_case :List[Any]=None , __snake_case :Optional[Any]=1.0 , __snake_case :Tuple=0.02 , __snake_case :str=False , __snake_case :List[str]=False , __snake_case :str=True , **__snake_case :List[str] , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__snake_case , is_encoder_decoder=__snake_case , **__snake_case )
if text_config is None:
__magic_name__ : int ={}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
__magic_name__ : str ={}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
__magic_name__ : Union[str, Any] =PixaStructTextConfig(**__snake_case )
__magic_name__ : str =PixaStructVisionConfig(**__snake_case )
__magic_name__ : int =self.text_config.decoder_start_token_id
__magic_name__ : Optional[Any] =self.text_config.pad_token_id
__magic_name__ : str =self.text_config.eos_token_id
__magic_name__ : int =initializer_factor
__magic_name__ : List[Any] =initializer_range
__magic_name__ : Dict =self.initializer_range
__magic_name__ : Union[str, Any] =self.initializer_range
__magic_name__ : Tuple =is_vqa
@classmethod
def A__ ( cls :Optional[int] , __snake_case :PixaStructTextConfig , __snake_case :PixaStructVisionConfig , **__snake_case :List[str] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
__magic_name__ : Any =self.text_config.to_dict()
__magic_name__ : List[str] =self.vision_config.to_dict()
__magic_name__ : Optional[int] =self.__class__.model_type
return output
| 367 | 1 |
'''simple docstring'''
import numpy as np
def __lowerCAmelCase ( a_ ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 | '''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 251 | 1 |
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float )-> np.ndarray:
return np.where(vector > 0 , UpperCamelCase_ , (alpha * (np.exp(UpperCamelCase_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self , a__ , a__=1_3 , a__=6_4 , a__=2 , a__=3 , a__=True , a__=True , a__=3_2 , a__=5 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=1_0 , a__=0.0_2 , a__=[1, 1_6, 4, 4] , a__=None , ):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 3_2) ** 2
A__ = num_patches + 1
def snake_case_ ( self):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
A__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a__ , )
def snake_case_ ( self , a__ , a__ , a__):
A__ = ViTHybridModel(config=a__)
model.to(a__)
model.eval()
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__):
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(a__)
model.to(a__)
model.eval()
A__ = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def snake_case_ ( self):
A__ = ViTHybridModelTester(self)
A__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def snake_case_ ( self):
pass
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear))
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(a__)
for model_class in self.all_model_classes:
A__ = model_class(config=a__)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def snake_case_ ( self):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def lowerCAmelCase__ ( )-> str:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def snake_case_ ( self):
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
A__ = model(**a__)
# verify the logits
A__ = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , a__)
A__ = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9]).to(a__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4))
@slow
@require_accelerate
def snake_case_ ( self):
A__ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
A__ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
A__ = prepare_img()
A__ = image_processor(images=a__ , return_tensors='''pt''')
A__ = model(**a__)
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 526 | 1 |
def _lowerCamelCase ( ):
'''simple docstring'''
A_ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A_ = 6
A_ = 1
A_ = 1901
A_ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A_ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A_ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A_ = day - days_per_month[month - 2]
if month > 12:
year += 1
A_ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 203 |
__lowercase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
A_ = Stack()
A_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
A_ = operator_stack.peek()
operator_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowercase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 203 | 1 |
'''simple docstring'''
from torch import nn
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 398 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""c"""] )
self.assertEqual(lowerCamelCase , [2] )
# Out indices set to match out features
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(["""a""", """c"""] , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(lowerCamelCase , [0, 2] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase_, lowerCAmelCase_ : Any = get_aligned_output_features_output_indices(lowerCamelCase , [-3, -1] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [-3, -1] )
def __lowercase ( self : List[str] ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , lowerCamelCase )
# Out features must be a list
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def __lowercase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = BackboneMixin()
lowerCAmelCase_ : List[str] = ["""a""", """b""", """c"""]
lowerCAmelCase_ : Tuple = ["""a""", """c"""]
lowerCAmelCase_ : Optional[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase_ : int = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase_ : Dict = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 398 | 1 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCAmelCase( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 27 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
SCREAMING_SNAKE_CASE__ = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
SCREAMING_SNAKE_CASE__ = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def _snake_case ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def _snake_case ( self : Any , UpperCAmelCase : Any):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').")
SCREAMING_SNAKE_CASE_ :str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ :Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE_ :Dict = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE_ :Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
SCREAMING_SNAKE_CASE_ :Dict = score.BleurtScorer(os.path.join(UpperCAmelCase , UpperCAmelCase))
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = self.scorer.score(references=UpperCAmelCase , candidates=UpperCAmelCase)
return {"scores": scores}
| 631 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[Any]:
_UpperCAmelCase =filter(lambda _lowerCamelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase =sum([np.prod(p.size() ) for p in model_parameters] )
return params
snake_case__ : Optional[Any] = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Dict:
if metric == "rouge2":
_UpperCAmelCase ="{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase ="{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase ="{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
" function." )
_UpperCAmelCase =ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=F"val_{metric}" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Dict:
return EarlyStopping(
monitor=F"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class _a ( pl.Callback ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase ={F"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_UpperCAmelCase =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase =Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase =od / "test_results.txt"
_UpperCAmelCase =od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase =od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase =od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case , "a+" ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase =metrics[key]
if isinstance(_snake_case , torch.Tensor ):
_UpperCAmelCase =val.item()
_UpperCAmelCase =F"{key}: {val:.6f}\n"
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase ="\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_snake_case )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
try:
_UpperCAmelCase =pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase =pl_module.model.num_parameters()
_UpperCAmelCase =count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_snake_case , _snake_case , "test" )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 718 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 | 0 |
"""simple docstring"""
from string import ascii_uppercase
__A = {str(ord(c) - 55): c for c in ascii_uppercase}
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 3_6:
raise ValueError("base must be <= 36" )
__lowerCAmelCase: Optional[Any] = ""
__lowerCAmelCase: List[Any] = 0
__lowerCAmelCase: List[str] = 0
while div != 1:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = divmod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if base >= 1_1 and 9 < mod < 3_6:
__lowerCAmelCase: Dict = ALPHABET_VALUES[str(__SCREAMING_SNAKE_CASE )]
else:
__lowerCAmelCase: Optional[Any] = str(__SCREAMING_SNAKE_CASE )
new_value += actual_value
__lowerCAmelCase: str = num // base
__lowerCAmelCase: Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 346 |
"""simple docstring"""
from __future__ import annotations
__A = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> tuple[list[list[int]], list[list[int]]]:
__lowerCAmelCase: Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__SCREAMING_SNAKE_CASE ) )
] # the reference grid
__lowerCAmelCase: Tuple = 1
__lowerCAmelCase: Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__SCREAMING_SNAKE_CASE ) )
] # the action grid
__lowerCAmelCase: Tuple = init[0]
__lowerCAmelCase: Any = init[1]
__lowerCAmelCase: Optional[int] = 0
__lowerCAmelCase: int = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase: Optional[Any] = [[f, g, x, y]]
__lowerCAmelCase: Union[str, Any] = False # flag that is set when search is complete
__lowerCAmelCase: List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase: Union[str, Any] = cell.pop()
__lowerCAmelCase: Optional[int] = next_cell[2]
__lowerCAmelCase: int = next_cell[3]
__lowerCAmelCase: Optional[int] = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase: int = True
else:
for i in range(len(__SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
__lowerCAmelCase: Dict = x + DIRECTIONS[i][0]
__lowerCAmelCase: str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase: Tuple = g + cost
__lowerCAmelCase: Union[str, Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase: int = 1
__lowerCAmelCase: List[Any] = i
__lowerCAmelCase: int = []
__lowerCAmelCase: Dict = goal[0]
__lowerCAmelCase: Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase: Tuple = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase: Tuple = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase: List[Any] = xa
__lowerCAmelCase: Dict = ya
invpath.append([x, y] )
__lowerCAmelCase: Tuple = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(__SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A = [0, 0]
# all coordinates are given in format [y,x]
__A = [len(grid) - 1, len(grid[0]) - 1]
__A = 1
# the cost map which pushes the path closer to the goal
__A = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A = 99
__A , __A = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 346 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model"}
UpperCamelCase__ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
UpperCamelCase__ = {"bert_for_seq_generation": 5_12}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[int] = []
lowercase__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Dict="<s>" , lowercase : Optional[Any]="</s>" , lowercase : str="<unk>" , lowercase : Dict="<pad>" , lowercase : Tuple="<::::>" , lowercase : Optional[Dict[str, Any]] = None , **lowercase : List[str] , ) -> None:
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str , lowercase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Optional[int] , lowercase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def snake_case__ ( self : Tuple , lowercase : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(lowercase )
def snake_case__ ( self : int , lowercase : int ) -> str:
"""simple docstring"""
__lowercase = self.sp_model.IdToPiece(lowercase )
return token
def snake_case__ ( self : List[Any] , lowercase : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
__lowercase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def snake_case__ ( self : Optional[int] , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 700 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = ['''pixel_values''']
def __init__( self , a = True , a = None , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ) -> None:
"""simple docstring"""
super().__init__(**a )
_A = size if size is not None else {'''shortest_edge''': 3_8_4}
_A = get_size_dict(a , default_to_square=a )
_A = do_resize
_A = size
# Default value set here for backwards compatibility where the value in config is None
_A = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
_A = resample
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self , a , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
"""simple docstring"""
_A = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
_A = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_A = int(shortest_edge / crop_pct )
_A = get_resize_output_image_size(a , size=a , default_to_square=a )
_A = resize(image=a , size=a , resample=a , data_format=a , **a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a , size=(shortest_edge, shortest_edge) , data_format=a , **a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a , size=(shortest_edge, shortest_edge) , resample=a , data_format=a , **a )
def lowercase_ ( self , a , a , a = None , **a , ) -> int:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def lowercase_ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowercase_ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
"""simple docstring"""
_A = do_resize if do_resize is not None else self.do_resize
_A = crop_pct if crop_pct is not None else self.crop_pct
_A = resample if resample is not None else self.resample
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(a , default_to_square=a )
_A = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_A = [to_numpy_array(a ) for image in images]
if do_resize:
_A = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_rescale:
_A = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
_A = [self.normalize(image=a , mean=a , std=a ) for image in images]
_A = [to_channel_dimension_format(a , a ) for image in images]
_A = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a ) | 317 |
import math
def UpperCAmelCase__ ( __snake_case ) -> str:
_A = 0
_A = 0
while num > 0:
_A = num % 8
_A = octal + (remainder * math.floor(math.pow(10 , __snake_case ) ))
counter += 1
_A = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(__snake_case )}'''
def UpperCAmelCase__ ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(216 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(512 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main() | 317 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a_ :int = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
a_ :List[str] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
a_ :Dict = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'], reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
], )
def lowercase_ ( self : Any, _snake_case : Dict, _snake_case : Dict, _snake_case : Any=4, _snake_case : List[str]=False ) ->Tuple:
snake_case__ : Optional[int] = compute_bleu(
reference_corpus=_snake_case, translation_corpus=_snake_case, max_order=_snake_case, smooth=_snake_case )
((snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__) , (snake_case__)) : Optional[int] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 243 |
import random
def lowercase_ (A : int ):
snake_case__ : List[str] = num - 1
snake_case__ : Union[str, Any] = 0
while s % 2 == 0:
snake_case__ : Any = s // 2
t += 1
for _ in range(5 ):
snake_case__ : List[Any] = random.randrange(2 , num - 1 )
snake_case__ : Tuple = pow(A , A , A )
if v != 1:
snake_case__ : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case__ : Tuple = i + 1
snake_case__ : Optional[int] = (v**2) % num
return True
def lowercase_ (A : int ):
if num < 2:
return False
snake_case__ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A )
def lowercase_ (A : int = 1_0_2_4 ):
while True:
snake_case__ : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A ):
return num
if __name__ == "__main__":
a_ :Any = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 243 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """spiece.model"""}
lowercase__ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase__ = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
lowercase__ = """▁"""
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=100 , lowercase=None , lowercase = None , lowercase=True , **lowercase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCamelCase : List[str] = [F'''<extra_id_{i}>''' for i in range(lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCamelCase : Any = len(set(filter(lambda lowercase : bool('extra_id' in str(lowercase ) ) , lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
_lowerCamelCase : int = legacy
_lowerCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase , **lowercase , )
_lowerCamelCase : Optional[Any] = vocab_file
_lowerCamelCase : Optional[int] = extra_ids
_lowerCamelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@staticmethod
def A_ ( lowercase , lowercase , lowercase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCamelCase : Optional[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowercase , )
return max_model_length
@property
def A_ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def A_ ( self ):
_lowerCamelCase : str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase )) + [1]
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def A_ ( self ):
return list(
set(filter(lambda lowercase : bool(re.search(r'<extra_id_\d+>' , lowercase ) ) is not None , self.additional_special_tokens ) ) )
def A_ ( self ):
return [self._convert_token_to_id(lowercase ) for token in self.get_sentinel_tokens()]
def A_ ( self , lowercase ):
if len(lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : int = self._add_eos_if_not_present(lowercase )
if token_ids_a is None:
return token_ids_a
else:
_lowerCamelCase : Any = self._add_eos_if_not_present(lowercase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_lowerCamelCase : Dict = self.__dict__.copy()
_lowerCamelCase : Dict = None
return state
def __setstate__( self , lowercase ):
_lowerCamelCase : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self , lowercase , **lowercase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCamelCase : List[Any] = SPIECE_UNDERLINE + text.replace(lowercase , ' ' )
return super().tokenize(lowercase , **lowercase )
def A_ ( self , lowercase , **lowercase ):
if not self.legacy:
_lowerCamelCase : Optional[int] = text.startswith(lowercase )
if is_first:
_lowerCamelCase : List[Any] = text[1:]
_lowerCamelCase : int = self.sp_model.encode(lowercase , out_type=lowercase )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(lowercase ):
_lowerCamelCase : List[Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def A_ ( self , lowercase ):
if token.startswith('<extra_id_' ):
_lowerCamelCase : int = re.match(r'<extra_id_(\d+)>' , lowercase )
_lowerCamelCase : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase )
def A_ ( self , lowercase ):
if index < self.sp_model.get_piece_size():
_lowerCamelCase : Optional[Any] = self.sp_model.IdToPiece(lowercase )
else:
_lowerCamelCase : Dict = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def A_ ( self , lowercase ):
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Dict = ''
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = []
else:
current_sub_tokens.append(lowercase )
_lowerCamelCase : Any = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : str = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , 'wb' ) as fi:
_lowerCamelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,) | 630 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=64 , lowercase=5 , lowercase=4 , lowercase=64 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : List[Any] = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Optional[int] = type_sequence_label_size
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : int = num_choices
_lowerCamelCase : int = scope
def A_ ( self ):
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def A_ ( self ):
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
if self.use_labels:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = MPNetModel(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Tuple = model(lowercase , lowercase )
_lowerCamelCase : Tuple = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : str = MPNetForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : str = model(
lowercase , attention_mask=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = self.num_labels
_lowerCamelCase : int = MPNetForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = self.num_choices
_lowerCamelCase : List[str] = MPNetForMultipleChoice(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase : Any = model(
lowercase , attention_mask=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Union[str, Any] = MPNetForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self ):
_lowerCamelCase : Dict = self.prepare_config_and_inputs()
((_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase), (_lowerCamelCase)) : List[Any] = config_and_inputs
_lowerCamelCase : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = True
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = MPNetModelTester(self )
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Optional[Any] = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCamelCase : Any = model(lowercase )[0]
_lowerCamelCase : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) | 630 | 1 |
'''simple docstring'''
A : Optional[Any] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 273 |
'''simple docstring'''
A : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
A : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def lowercase_ ( lowercase__ ) ->str:
_snake_case: Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase_ ( lowercase__ ) ->str:
if set(lowercase__ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
_snake_case: List[Any] = ''
for word in coded.split():
while len(lowercase__ ) != 0:
decoded += decode_dict[word[:5]]
_snake_case: Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 273 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A: Optional[int] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = ["""DeiTFeatureExtractor"""]
_A: Optional[int] = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Dict = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: List[Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_A: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 126 |
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 | 0 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a_ :
UpperCamelCase_ : int = field(
default=__UpperCamelCase , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
UpperCamelCase_ : int = field(
default=__UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__UpperCamelCase )} , )
UpperCamelCase_ : Dict = field(
default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCamelCase_ : Any = field(
default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCamelCase_ : Optional[Any] = field(
default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class a_ :
UpperCamelCase_ : Union[str, Any] = field(
default=__UpperCamelCase , metadata={"help": "The input training data file (a text file)."} )
UpperCamelCase_ : Optional[int] = field(
default=__UpperCamelCase , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
UpperCamelCase_ : List[str] = field(
default=__UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
UpperCamelCase_ : List[Any] = field(
default=__UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
UpperCamelCase_ : Optional[int] = field(
default=__UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
UpperCamelCase_ : Any = field(
default=__UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
UpperCamelCase_ : List[Any] = field(
default=__UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
UpperCamelCase_ : Union[str, Any] = field(default=__UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."} )
UpperCamelCase_ : Optional[Any] = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
UpperCamelCase_ : Any = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
UpperCamelCase_ : int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
UpperCamelCase_ : List[str] = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
UpperCamelCase_ : List[Any] = field(
default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , ):
"""simple docstring"""
def _dataset(lowerCamelCase__ , lowerCamelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , ref_path=lowerCamelCase__ , )
return LineByLineTextDataset(tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCAmelCase__ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowerCAmelCase__ = AutoModelWithLMHead.from_config(lowerCamelCase__ )
model.resize_token_embeddings(len(lowerCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCAmelCase__ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ = (
get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ = (
get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , evaluate=lowerCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ = DataCollatorForWholeWordMask(
tokenizer=lowerCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ = DataCollatorForLanguageModeling(
tokenizer=lowerCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , data_collator=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , prediction_loss_only=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase__ = trainer.evaluate()
lowerCAmelCase__ = math.exp(eval_output["""eval_loss"""] )
lowerCAmelCase__ = {"""perplexity""": perplexity}
lowerCAmelCase__ = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCamelCase__ )
return results
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 702 | """simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCamelCase__ =datasets.logging.get_logger(__name__)
UpperCamelCase__ ='\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
UpperCamelCase__ ='\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
UpperCamelCase__ ='\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n'
UpperCamelCase__ ={
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
_SCREAMING_SNAKE_CASE : str = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
_SCREAMING_SNAKE_CASE : List[Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_SCREAMING_SNAKE_CASE : int = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
_SCREAMING_SNAKE_CASE : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = score.BleurtScorer(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
_SCREAMING_SNAKE_CASE : List[Any] = self.scorer.score(references=__lowerCAmelCase , candidates=__lowerCAmelCase )
return {"scores": scores} | 249 | """simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : Dict ):
_UpperCAmelCase = parent
_UpperCAmelCase = config_class
_UpperCAmelCase = has_text_modality
_UpperCAmelCase = kwargs
_UpperCAmelCase = common_properties
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__lowerCAmelCase ):
try:
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.parent.assertEqual(
getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__lowerCAmelCase ):
try:
_UpperCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__lowerCAmelCase , """config.json""" )
config_first.to_json_file(__lowerCAmelCase )
_UpperCAmelCase = self.config_class.from_json_file(__lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = self.config_class.from_pretrained(__lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
_UpperCAmelCase = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
config_first.save_pretrained(__lowerCAmelCase )
_UpperCAmelCase = self.config_class.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCAmelCase_ ( self : List[str] ):
if self.config_class.is_composition:
return
_UpperCAmelCase = self.config_class()
self.parent.assertIsNotNone(__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = copy.deepcopy(__lowerCAmelCase )
_UpperCAmelCase = self.config_class(**__lowerCAmelCase )
_UpperCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(__lowerCAmelCase , __lowerCAmelCase ) != value:
wrong_values.append((key, getattr(__lowerCAmelCase , __lowerCAmelCase ), value) )
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = """\n""".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 277 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ) -> Union[str, Any]:
if start is None:
__magic_name__ = 0
if end is None:
__magic_name__ = len(_lowerCamelCase ) - 1
if start >= end:
return
__magic_name__ = (start + end) // 2
slowsort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
slowsort(_lowerCamelCase , mid + 1 , _lowerCamelCase )
if sequence[end] < sequence[mid]:
__magic_name__ = sequence[mid], sequence[end]
slowsort(_lowerCamelCase , _lowerCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase ( __UpperCamelCase ) -> Union[str, Any]:
__magic_name__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = StableDiffusionLatentUpscalePipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
_lowerCamelCase = True
@property
def lowerCAmelCase__ ( self ):
__magic_name__ = 1
__magic_name__ = 4
__magic_name__ = (16, 16)
__magic_name__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__magic_name__ = EulerDiscreteScheduler(prediction_type='''sample''' )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
__magic_name__ = CLIPTextModel(UpperCamelCase_ )
__magic_name__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__magic_name__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(UpperCamelCase_ )
else:
__magic_name__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = '''cpu'''
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = pipe(**UpperCamelCase_ ).images
__magic_name__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
__magic_name__ = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__magic_name__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def lowerCAmelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase__ ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase__ ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase__ ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase__ ( self ):
__magic_name__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = self.get_dummy_inputs(UpperCamelCase_ )
__magic_name__ = 2
__magic_name__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__magic_name__ = getattr(UpperCamelCase_ , scheduler_enum.name )
__magic_name__ = scheduler_cls.from_config(pipe.scheduler.config )
__magic_name__ = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
__magic_name__ = torch.manual_seed(33 )
__magic_name__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__magic_name__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__magic_name__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__magic_name__ = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''latent''' ).images
__magic_name__ = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type='''np''' , ).images[0]
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase__ ( self ):
__magic_name__ = torch.manual_seed(33 )
__magic_name__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__magic_name__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__magic_name__ = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type='''np''' , ).images[0]
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 190 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> Union[str, Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def _lowerCAmelCase ( __magic_name__ : str ) -> Optional[int]:
# word like '180' or '身高' or '神'
for char in word:
lowercase : Optional[int] =ord(__magic_name__ )
if not _is_chinese_char(__magic_name__ ):
return 0
return 1
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[str]:
lowercase : str =set()
for token in tokens:
lowercase : Optional[int] =len(__magic_name__ ) > 1 and is_chinese(__magic_name__ )
if chinese_word:
word_set.add(__magic_name__ )
lowercase : str =list(__magic_name__ )
return word_list
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : set() ) -> Optional[int]:
if not chinese_word_set:
return bert_tokens
lowercase : Optional[Any] =max([len(__magic_name__ ) for w in chinese_word_set] )
lowercase : Optional[int] =bert_tokens
lowercase , lowercase : Dict =0, len(__magic_name__ )
while start < end:
lowercase : List[Any] =True
if is_chinese(bert_word[start] ):
lowercase : Dict =min(end - start , __magic_name__ )
for i in range(__magic_name__ , 1 , -1 ):
lowercase : int =''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase : Optional[Any] ='''##''' + bert_word[j]
lowercase : List[str] =start + i
lowercase : Optional[Any] =False
break
if single_word:
start += 1
return bert_word
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : LTP , __magic_name__ : BertTokenizer ) -> Dict:
lowercase : List[Any] =[]
for i in range(0 , len(__magic_name__ ) , 100 ):
lowercase : Optional[Any] =ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowercase : Any =[get_chinese_word(__magic_name__ ) for r in res]
ltp_res.extend(__magic_name__ )
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase : Union[str, Any] =[]
for i in range(0 , len(__magic_name__ ) , 100 ):
lowercase : Union[str, Any] =bert_tokenizer(lines[i : i + 100] , add_special_tokens=__magic_name__ , truncation=__magic_name__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase : Optional[Any] =[]
for input_ids, chinese_word in zip(__magic_name__ , __magic_name__ ):
lowercase : Optional[int] =[]
for id in input_ids:
lowercase : Union[str, Any] =bert_tokenizer._convert_id_to_token(__magic_name__ )
input_tokens.append(__magic_name__ )
lowercase : List[Any] =add_sub_symbol(__magic_name__ , __magic_name__ )
lowercase : str =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__magic_name__ ):
if token[:2] == "##":
lowercase : str =token[2:]
# save chinese tokens' pos
if len(__magic_name__ ) == 1 and _is_chinese_char(ord(__magic_name__ ) ):
ref_id.append(__magic_name__ )
ref_ids.append(__magic_name__ )
assert len(__magic_name__ ) == len(__magic_name__ )
return ref_ids
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Dict:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowercase : List[Any] =f.readlines()
lowercase : int =[line.strip() for line in data if len(__magic_name__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase : List[Any] =LTP(args.ltp ) # faster in GPU device
lowercase : List[str] =BertTokenizer.from_pretrained(args.bert )
lowercase : Tuple =prepare_ref(__magic_name__ , __magic_name__ , __magic_name__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowercase : Tuple =[json.dumps(__magic_name__ ) + '''\n''' for ref in ref_ids]
f.writelines(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
UpperCamelCase_ = parser.parse_args()
main(args)
| 92 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
a__ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class UpperCamelCase__ :
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__ : str = field(
default=__lowerCamelCase , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__ : Optional[bool] = field(
default=__lowerCamelCase , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _lowercase ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' ,__lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase__ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCamelCase__ : Optional[int] = load_dataset(
'''xnli''' ,model_args.language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
UpperCamelCase__ : Any = load_dataset(
'''xnli''' ,model_args.train_language ,split='''train''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : Union[str, Any] = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCamelCase__ : Optional[int] = load_dataset(
'''xnli''' ,model_args.language ,split='''validation''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : int = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCamelCase__ : Union[str, Any] = load_dataset(
'''xnli''' ,model_args.language ,split='''test''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : List[Any] = predict_dataset.features['''label'''].names
# Labels
UpperCamelCase__ : Dict = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__lowerCamelCase ,idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} ,labelaid={label: i for i, label in enumerate(__lowerCamelCase )} ,finetuning_task='''xnli''' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
UpperCamelCase__ : str = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__lowerCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase__ : int = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase__ : Any = False
def preprocess_function(__lowerCamelCase : Dict ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] ,examples['''hypothesis'''] ,padding=__lowerCamelCase ,max_length=data_args.max_seq_length ,truncation=__lowerCamelCase ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ : Tuple = min(len(__lowerCamelCase ) ,data_args.max_train_samples )
UpperCamelCase__ : str = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCamelCase__ : Union[str, Any] = train_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on train dataset''' ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) ,3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ : Optional[Any] = min(len(__lowerCamelCase ) ,data_args.max_eval_samples )
UpperCamelCase__ : Dict = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCamelCase__ : Optional[int] = eval_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on validation dataset''' ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCamelCase__ : int = min(len(__lowerCamelCase ) ,data_args.max_predict_samples )
UpperCamelCase__ : List[Any] = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCamelCase__ : int = predict_dataset.map(
__lowerCamelCase ,batched=__lowerCamelCase ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on prediction dataset''' ,)
# Get the metric function
UpperCamelCase__ : Union[str, Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
UpperCamelCase__ : str = p.predictions[0] if isinstance(p.predictions ,__lowerCamelCase ) else p.predictions
UpperCamelCase__ : Optional[int] = np.argmax(__lowerCamelCase ,axis=1 )
return metric.compute(predictions=__lowerCamelCase ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase__ : List[Any] = default_data_collator
elif training_args.fpaa:
UpperCamelCase__ : List[str] = DataCollatorWithPadding(__lowerCamelCase ,pad_to_multiple_of=8 )
else:
UpperCamelCase__ : List[str] = None
# Initialize our Trainer
UpperCamelCase__ : Any = Trainer(
model=__lowerCamelCase ,args=__lowerCamelCase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=__lowerCamelCase ,tokenizer=__lowerCamelCase ,data_collator=__lowerCamelCase ,)
# Training
if training_args.do_train:
UpperCamelCase__ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ : Union[str, Any] = last_checkpoint
UpperCamelCase__ : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = train_result.metrics
UpperCamelCase__ : Any = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
UpperCamelCase__ : int = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,__lowerCamelCase )
trainer.save_metrics('''train''' ,__lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate(eval_dataset=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' ,__lowerCamelCase )
trainer.save_metrics('''eval''' ,__lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = trainer.predict(__lowerCamelCase ,metric_key_prefix='''predict''' )
UpperCamelCase__ : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
UpperCamelCase__ : List[Any] = min(__lowerCamelCase ,len(__lowerCamelCase ) )
trainer.log_metrics('''predict''' ,__lowerCamelCase )
trainer.save_metrics('''predict''' ,__lowerCamelCase )
UpperCamelCase__ : Dict = np.argmax(__lowerCamelCase ,axis=1 )
UpperCamelCase__ : List[Any] = os.path.join(training_args.output_dir ,'''predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase ,'''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__lowerCamelCase ):
UpperCamelCase__ : Tuple = label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 344 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase__ : Tuple = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase__ : Any = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = BlenderbotSmallTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=SCREAMING_SNAKE_CASE_ , merges=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , ) , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Union[str, Any] = add_prefix_space
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 495 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case ( UpperCAmelCase_ ):
@staticmethod
@abstractmethod
def lowercase__ ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def lowercase__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 495 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.